mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
23 Commits
execution-
...
prestonvan
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8354db435 | ||
|
|
e770f0fe1f | ||
|
|
30b8fba2ac | ||
|
|
5d94fd48ca | ||
|
|
176d763091 | ||
|
|
97dc9ebbc8 | ||
|
|
6a74dcf35b | ||
|
|
a775798d89 | ||
|
|
e2442bb0a6 | ||
|
|
2669006694 | ||
|
|
5722a5793c | ||
|
|
96aba590b5 | ||
|
|
2162ffb05f | ||
|
|
d41805a39c | ||
|
|
819632dfc5 | ||
|
|
10fffa6e7c | ||
|
|
5cda86bb93 | ||
|
|
f2dcc9a570 | ||
|
|
63354b5bb7 | ||
|
|
e48f0aef41 | ||
|
|
ab1defc5de | ||
|
|
b0e15f3c8f | ||
|
|
e01a898264 |
@@ -63,7 +63,6 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -51,7 +51,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if !isExecutionBlk {
|
||||
return nil, nil
|
||||
}
|
||||
headPayload, err := headBlk.Body().ExecutionPayload()
|
||||
headPayload, err := headBlk.Body().Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return nil, nil
|
||||
@@ -59,7 +59,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
err := s.optimisticCandidateBlock(ctx, headBlk)
|
||||
@@ -149,6 +149,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
|
||||
} else if hasAttr && payloadID == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
"slot": headBlk.Slot(),
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
@@ -163,11 +168,11 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
}
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload.
|
||||
@@ -193,7 +198,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
if !enabled {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
|
||||
}
|
||||
@@ -206,7 +211,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, s.optimisticCandidateBlock(ctx, blk.Block())
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
|
||||
@@ -220,7 +220,7 @@ func (s *Service) headBlock() interfaces.SignedBeaconBlock {
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
_, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
return s.head.state.Copy()
|
||||
|
||||
@@ -45,12 +45,16 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() == version.Bellatrix {
|
||||
p, err := b.Body().ExecutionPayload()
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash)))
|
||||
log = log.WithField("txCount", len(p.Transactions))
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("txCount", len(txs))
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
@@ -98,18 +102,18 @@ func logPayload(block interfaces.BeaconBlock) error {
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().ExecutionPayload()
|
||||
payload, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit == 0 {
|
||||
if payload.GasLimit() == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed) / float64(payload.GasLimit)
|
||||
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash)),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}).Debug("Synced new payload")
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
v.Slashed = true
|
||||
require.NoError(t, h.UpdateValidatorAtIndex(0, v))
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 1, Data: util.NewAttestationUtil().HydrateAttestationData(ð.AttestationData{})}))
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 1, Data: util.HydrateAttestationData(ð.AttestationData{})}))
|
||||
err = reportEpochMetrics(context.Background(), h, h)
|
||||
require.ErrorContains(t, "slot 0 out of bounds", err)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -41,17 +40,17 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
if err := wrapper.BeaconBlockIsNil(b); err != nil {
|
||||
return err
|
||||
}
|
||||
payload, err := b.Block().Body().ExecutionPayload()
|
||||
payload, err := b.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload == nil {
|
||||
if payload.IsNil() {
|
||||
return errors.New("nil execution payload")
|
||||
}
|
||||
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
|
||||
return errors.Wrap(err, "could not validate terminal block hash")
|
||||
}
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash)
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
||||
}
|
||||
@@ -71,7 +70,7 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Block().Slot(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash).String(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash()).String(),
|
||||
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
|
||||
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
|
||||
"mergeBlockTotalDifficulty": mergeBlockTD,
|
||||
@@ -85,7 +84,7 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
|
||||
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
|
||||
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash))
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash), false /* no txs */)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get pow block")
|
||||
}
|
||||
@@ -110,14 +109,14 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload *enginev1.ExecutionPayload) error {
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) error {
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
||||
return nil
|
||||
}
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
if !bytes.Equal(payload.ParentHash(), params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -211,16 +211,22 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
wrapped, err := wrapper.WrappedExecutionPayload(&enginev1.ExecutionPayload{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalBlockHash = [32]byte{0x01}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
cfg.TerminalBlockHashActivationEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{ParentHash: cfg.TerminalBlockHash.Bytes()}))
|
||||
wrapped, err = wrapper.WrappedExecutionPayload(&enginev1.ExecutionPayload{
|
||||
ParentHash: cfg.TerminalBlockHash.Bytes(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
}
|
||||
|
||||
@@ -70,7 +70,6 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
@@ -78,17 +77,17 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
wantedErr: "slot 32 does not match target epoch 0",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}}),
|
||||
wantedErr: "target epoch 100 does not match current epoch",
|
||||
},
|
||||
@@ -177,7 +176,6 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
@@ -185,17 +183,17 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
wantedErr: "slot 32 does not match target epoch 0",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}}),
|
||||
wantedErr: "target epoch 100 does not match current epoch",
|
||||
},
|
||||
@@ -251,7 +249,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.NewAttestationUtil().GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
@@ -281,7 +279,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.NewAttestationUtil().GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
@@ -424,7 +422,8 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
d := util.NewAttestationUtil().HydrateAttestationData(ðpb.AttestationData{})
|
||||
|
||||
d := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
require.Equal(t, errBlockNotFoundInCacheOrDB, service.verifyBeaconBlock(ctx, d))
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
@@ -587,11 +586,15 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
}
|
||||
|
||||
// Skip validation if block has an empty payload.
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
if bellatrix.IsEmptyPayload(payload) {
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isEmpty {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -602,11 +605,16 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
}
|
||||
|
||||
// Skip validation if the block is not a merge transition block.
|
||||
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(stateHeader, blk.Block().Body())
|
||||
// To reach here. The payload must be non-empty. If the state header is empty then it's at transition.
|
||||
wh, err := wrapper.WrappedExecutionPayloadHeader(stateHeader)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if merge block is terminal")
|
||||
return err
|
||||
}
|
||||
if !atTransition {
|
||||
empty, err := wrapper.IsEmptyExecutionData(wh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !empty {
|
||||
return nil
|
||||
}
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
|
||||
@@ -1489,9 +1489,11 @@ func Test_getStateVersionAndPayload(t *testing.T) {
|
||||
name: "bellatrix state",
|
||||
st: func() state.BeaconState {
|
||||
s, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, s.SetLatestExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{
|
||||
BlockNumber: 1,
|
||||
}))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
return s
|
||||
}(),
|
||||
version: version.Bellatrix,
|
||||
@@ -1543,6 +1545,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state older than Bellatrix, nil payload",
|
||||
stateVersion: 1,
|
||||
payload: nil,
|
||||
errString: "attempted to wrap nil",
|
||||
},
|
||||
{
|
||||
name: "state older than Bellatrix, empty payload",
|
||||
@@ -1569,6 +1572,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state is Bellatrix, nil payload",
|
||||
stateVersion: 2,
|
||||
payload: nil,
|
||||
errString: "attempted to wrap nil",
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, empty payload",
|
||||
@@ -1618,7 +1622,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: aHash[:],
|
||||
},
|
||||
errString: "nil header or block body",
|
||||
errString: "attempted to wrap nil object",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -1664,9 +1668,8 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
@@ -1681,7 +1684,7 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
|
||||
@@ -60,7 +60,7 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := "FFG and LMD votes are not consistent"
|
||||
a := util.NewAttestationUtil().NewAttestation()
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'a'}
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
@@ -85,7 +85,8 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
a := util.NewAttestationUtil().NewAttestation()
|
||||
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = r32[:]
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
@@ -105,7 +106,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.NewAttestationUtil().GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
@@ -226,7 +227,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
// Generate attestatios for this block in Slot 1
|
||||
atts, err := util.NewAttestationUtil().GenerateAttestations(copied, pks, 1, 1, false)
|
||||
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
// Verify the target is in forchoice
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"metric.go",
|
||||
"option.go",
|
||||
"service.go",
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
package builder
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
ErrNotRunning = errors.New("builder is not running")
|
||||
)
|
||||
@@ -2,6 +2,7 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error)
|
||||
GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error)
|
||||
Status() error
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
Configured() bool
|
||||
}
|
||||
@@ -60,6 +60,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
return nil, err
|
||||
}
|
||||
s.c = c
|
||||
|
||||
// Is the builder up?
|
||||
if err := s.c.Status(ctx); err != nil {
|
||||
return nil, fmt.Errorf("could not connect to builder: %v", err)
|
||||
}
|
||||
|
||||
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
|
||||
}
|
||||
return s, nil
|
||||
|
||||
@@ -15,7 +15,6 @@ type MockBuilderService struct {
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
ErrGetHeader error
|
||||
ErrStatus error
|
||||
ErrRegisterValidator error
|
||||
}
|
||||
|
||||
@@ -34,11 +33,6 @@ func (s *MockBuilderService) GetHeader(context.Context, types.Slot, [32]byte, [4
|
||||
return s.Bid, s.ErrGetHeader
|
||||
}
|
||||
|
||||
// Status for mocking.
|
||||
func (s *MockBuilderService) Status() error {
|
||||
return s.ErrStatus
|
||||
}
|
||||
|
||||
// RegisterValidator for mocking.
|
||||
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
return s.ErrRegisterValidator
|
||||
|
||||
@@ -77,7 +77,7 @@ func New() (*DepositCache, error) {
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -111,7 +111,7 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -130,7 +130,7 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*eth
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -180,7 +180,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -191,7 +191,7 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.Depos
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -212,7 +212,7 @@ func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -228,7 +228,7 @@ func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, block
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -249,7 +249,7 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposits {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
@@ -29,7 +29,7 @@ type PendingDepositsFetcher interface {
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -66,7 +66,7 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -90,7 +90,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
|
||||
// RemovePendingDeposit from the database. The deposit is indexed by the
|
||||
// Index. This method does nothing if deposit ptr is nil.
|
||||
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
defer span.End()
|
||||
|
||||
if d == nil {
|
||||
@@ -128,7 +128,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
attestations := []*ethpb.Attestation{
|
||||
util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
Slot: 5,
|
||||
@@ -55,7 +55,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
|
||||
Target: ðpb.Checkpoint{Epoch: 0}}})
|
||||
@@ -201,7 +201,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
|
||||
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
defer span.End()
|
||||
vals := make([]*precompute.Validator, beaconState.NumValidators())
|
||||
bal := &precompute.Balance{}
|
||||
@@ -76,7 +76,7 @@ func ProcessInactivityScores(
|
||||
beaconState state.BeaconState,
|
||||
vals []*precompute.Validator,
|
||||
) (state.BeaconState, []*precompute.Validator, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
defer span.End()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
@@ -144,7 +144,7 @@ func ProcessEpochParticipation(
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
defer span.End()
|
||||
|
||||
cp, err := beaconState.CurrentEpochParticipation()
|
||||
|
||||
@@ -31,7 +31,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -89,7 +88,6 @@ go_test(
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
data := util.NewAttestationUtil().HydrateAttestationData(ðpb.AttestationData{
|
||||
data := util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
})
|
||||
@@ -85,7 +85,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
func TestVerifyAttestationNoVerifySignature_IncorrectSlotTargetEpoch(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
@@ -218,7 +218,7 @@ func TestConvertToIndexed_OK(t *testing.T) {
|
||||
|
||||
var sig [fieldparams.BLSSignatureLength]byte
|
||||
copy(sig[:], "signed")
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Signature: sig[:],
|
||||
})
|
||||
for _, tt := range tests {
|
||||
@@ -261,12 +261,11 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
attestation *ethpb.IndexedAttestation
|
||||
}{
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
},
|
||||
@@ -276,7 +275,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}},
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
},
|
||||
@@ -285,7 +284,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}},
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 4,
|
||||
},
|
||||
@@ -294,7 +293,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}},
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 7,
|
||||
},
|
||||
@@ -412,8 +411,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -432,7 +430,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1*params.BeaconConfig().SlotsPerEpoch+1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1*params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
@@ -472,8 +470,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -492,7 +489,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -537,8 +534,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(ctx, st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -557,7 +553,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(ctx, st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
|
||||
@@ -19,12 +19,11 @@ import (
|
||||
)
|
||||
|
||||
func TestSlashableAttestationData_CanSlash(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestationData(ðpb.AttestationData{
|
||||
att1 := util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)},
|
||||
})
|
||||
att2 := au.HydrateAttestationData(ðpb.AttestationData{
|
||||
att2 := util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'B'}, 32)},
|
||||
})
|
||||
@@ -36,10 +35,9 @@ func TestSlashableAttestationData_CanSlash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
slashings := []*ethpb.AttesterSlashing{{
|
||||
Attestation_1: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{}),
|
||||
Attestation_2: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
Target: ðpb.Checkpoint{Epoch: 1}},
|
||||
@@ -73,16 +71,15 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
|
||||
}),
|
||||
Attestation_2: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
|
||||
}),
|
||||
},
|
||||
@@ -105,8 +102,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
vv.WithdrawableEpoch = types.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
@@ -121,7 +117,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
@@ -175,8 +171,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
vv.WithdrawableEpoch = types.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
@@ -191,7 +186,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
@@ -245,8 +240,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
vv.WithdrawableEpoch = types.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
@@ -261,7 +255,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
|
||||
@@ -39,9 +39,8 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
expectedSlashedVal := 2800
|
||||
|
||||
root1 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '1'}
|
||||
au := util.AttestationUtil{}
|
||||
att1 := ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: 0, Root: root1[:]}}),
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: 0, Root: root1[:]}}),
|
||||
AttestingIndices: setA,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
@@ -59,7 +58,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
|
||||
root2 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '2'}
|
||||
att2 := ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root2[:]},
|
||||
}),
|
||||
AttestingIndices: setB,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -39,20 +38,15 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !bellatrix.IsEmptyHeader(h), nil
|
||||
}
|
||||
|
||||
// IsMergeTransitionBlockUsingPreStatePayloadHeader returns true if the input block is the terminal merge block.
|
||||
// Terminal merge block must be associated with an empty payload header.
|
||||
// This assumes the header `h` is referenced as the parent state for block body `body.
|
||||
func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *enginev1.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if h == nil || body == nil {
|
||||
return false, errors.New("nil header or block body")
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(h)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bellatrix.IsEmptyHeader(h) {
|
||||
return false, nil
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(wrappedHeader)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
return !isEmpty, nil
|
||||
}
|
||||
|
||||
// IsExecutionBlock returns whether the block has a non-empty ExecutionPayload.
|
||||
@@ -64,7 +58,7 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, wrapper.ErrUnsupportedField):
|
||||
return false, nil
|
||||
@@ -72,7 +66,11 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
return !bellatrix.IsEmptyPayload(payload), nil
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(payload)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !isEmpty, nil
|
||||
}
|
||||
|
||||
// IsExecutionEnabled returns true if the beacon chain can begin executing.
|
||||
@@ -98,7 +96,15 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
|
||||
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
|
||||
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
|
||||
func IsExecutionEnabledUsingHeader(header *enginev1.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if !bellatrix.IsEmptyHeader(header) {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(header)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(wrappedHeader)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !isEmpty {
|
||||
return true, nil
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
@@ -116,7 +122,7 @@ func IsPreBellatrixVersion(v int) bool {
|
||||
// # Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
// if is_merge_complete(state):
|
||||
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionData) error {
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -129,7 +135,7 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.E
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, header.BlockHash) {
|
||||
if !bytes.Equal(payload.ParentHash(), header.BlockHash) {
|
||||
return errors.New("incorrect block hash")
|
||||
}
|
||||
return nil
|
||||
@@ -143,20 +149,20 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.E
|
||||
// assert payload.random == get_randao_mix(state, get_current_epoch(state))
|
||||
// # Verify timestamp
|
||||
// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) error {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(payload.PrevRandao, random) {
|
||||
if !bytes.Equal(payload.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.Timestamp != uint64(t.Unix()) {
|
||||
if payload.Timestamp() != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
@@ -194,27 +200,29 @@ func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) e
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, payload *enginev1.ExecutionPayload) (state.BeaconState, error) {
|
||||
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ValidatePayload(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
header, err := bellatrix.PayloadToHeader(payload)
|
||||
header, err := wrapper.PayloadToHeader(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(wrappedHeader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) error {
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
@@ -228,20 +236,20 @@ func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *engin
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.ParentHash, h.BlockHash) {
|
||||
if !bytes.Equal(header.ParentHash(), h.BlockHash) {
|
||||
return ErrInvalidPayloadBlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeader validates the payload header.
|
||||
func ValidatePayloadHeader(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) error {
|
||||
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Validate header's random mix matches with state in current epoch
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.PrevRandao, random) {
|
||||
if !bytes.Equal(header.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
|
||||
@@ -250,22 +258,20 @@ func ValidatePayloadHeader(st state.BeaconState, header *enginev1.ExecutionPaylo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.Timestamp != uint64(t.Unix()) {
|
||||
if header.Timestamp() != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPayloadHeader processes the payload header.
|
||||
func ProcessPayloadHeader(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) (state.BeaconState, error) {
|
||||
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ValidatePayloadHeader(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -278,9 +284,9 @@ func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||
if IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
payload, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
@@ -160,7 +159,9 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.payload)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
got, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
@@ -170,185 +171,6 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header *enginev1.ExecutionPayloadHeader
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "non-empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
}(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has parent hash",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has fee recipient",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has state root",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has receipt root",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has logs bloom",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has random",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has base fee",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has block hash",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has tx",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.Transactions = [][]byte{{'a'}}
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has extra data",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has block number",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BlockNumber = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has gas limit",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.GasLimit = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has gas used",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.GasUsed = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has timestamp",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.Timestamp = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBeaconBlockBody(blk.Block.Body)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(tt.header, body)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsExecutionBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -442,7 +264,9 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBeaconBlockBody(blk.Block.Body)
|
||||
@@ -567,8 +391,12 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
|
||||
err := blocks.ValidatePayloadWhenMergeCompletes(st, tt.payload)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadWhenMergeCompletes(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -616,7 +444,9 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayload(st, tt.payload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayload(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -664,12 +494,14 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, err := blocks.ProcessPayload(st, tt.payload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
want, err := bellatrix.PayloadToHeader(tt.payload)
|
||||
want, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
require.Equal(t, tt.err, err)
|
||||
got, err := st.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
@@ -717,7 +549,9 @@ func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, err := blocks.ProcessPayloadHeader(st, tt.header)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayloadHeader(st, wrappedHeader)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -768,7 +602,9 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayloadHeader(st, tt.header)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadHeader(st, wrappedHeader)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -777,7 +613,9 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
emptySt := st.Copy()
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}}))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
@@ -816,7 +654,9 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, wrappedHeader)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -824,7 +664,9 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
|
||||
func Test_PayloadToHeader(t *testing.T) {
|
||||
p := emptyPayload()
|
||||
h, err := bellatrix.PayloadToHeader(p)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(p)
|
||||
require.NoError(t, err)
|
||||
h, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
require.NoError(t, err)
|
||||
txRoot, err := ssz.TransactionsRoot(p.Transactions)
|
||||
require.NoError(t, err)
|
||||
@@ -863,7 +705,9 @@ func Test_PayloadToHeader(t *testing.T) {
|
||||
|
||||
func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(b, 1)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(emptyPayloadHeader()))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(emptyPayloadHeader())
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
// pre computed instances of validators attesting records and total
|
||||
// balances attested in an epoch.
|
||||
func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
ctx, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
defer span.End()
|
||||
|
||||
pValidators := make([]*Validator, s.NumValidators())
|
||||
|
||||
@@ -290,24 +290,17 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if enabled {
|
||||
executionData, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blk.IsBlinded() {
|
||||
header, err := blk.Body().ExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayloadHeader(state, header)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload header")
|
||||
}
|
||||
state, err = b.ProcessPayloadHeader(state, executionData)
|
||||
} else {
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayload(state, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload")
|
||||
}
|
||||
state, err = b.ProcessPayload(state, executionData)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -133,16 +133,15 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
|
||||
}),
|
||||
},
|
||||
}
|
||||
au := util.AttestationUtil{}
|
||||
attesterSlashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
@@ -153,7 +152,7 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
|
||||
blockRoots = append(blockRoots, []byte{byte(i)})
|
||||
}
|
||||
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
|
||||
blockAtt := au.HydrateAttestation(ðpb.Attestation{
|
||||
blockAtt := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
},
|
||||
@@ -256,8 +255,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
|
||||
mockRoot2 := [32]byte{'A'}
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot2[:]},
|
||||
},
|
||||
@@ -273,7 +271,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
mockRoot3 := [32]byte{'B'}
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot3[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -303,7 +301,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
|
||||
aggBits := bitfield.NewBitlist(1)
|
||||
aggBits.SetBitAt(0, true)
|
||||
blockAtt := au.HydrateAttestation(ðpb.Attestation{
|
||||
blockAtt := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: beaconState.Slot(),
|
||||
Target: ðpb.Checkpoint{Epoch: time.CurrentEpoch(beaconState)},
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"log.go",
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
"migration_blinded_beacon_blocks.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_state_validators.go",
|
||||
"powchain.go",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// LastArchivedSlot from the db.
|
||||
func (s *Store) LastArchivedSlot(ctx context.Context) (types.Slot, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
defer span.End()
|
||||
var index types.Slot
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -26,7 +26,7 @@ func (s *Store) LastArchivedSlot(ctx context.Context) (types.Slot, error) {
|
||||
|
||||
// LastArchivedRoot from the db.
|
||||
func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
@@ -44,7 +44,7 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
// ArchivedPointRoot returns the block root of an archived point from the DB.
|
||||
// This is essential for cold state management and to restore a cold state.
|
||||
func (s *Store) ArchivedPointRoot(ctx context.Context, slot types.Slot) [32]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
@@ -61,7 +61,7 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot types.Slot) [32]byte
|
||||
|
||||
// HasArchivedPoint returns true if an archived point exists in DB.
|
||||
func (s *Store) HasArchivedPoint(ctx context.Context, slot types.Slot) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
defer span.End()
|
||||
var exists bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -53,7 +54,7 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.Signe
|
||||
// at the time the chain was started, used to initialize the database and chain
|
||||
// without syncing from genesis.
|
||||
func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
@@ -72,7 +73,7 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
|
||||
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
|
||||
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
@@ -130,7 +131,7 @@ func (s *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]interface
|
||||
encoded := bkt.Get(keys[i])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "could not unmarshal block with key %#x", keys[i])
|
||||
}
|
||||
blocks = append(blocks, blk)
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
@@ -168,7 +169,7 @@ func (s *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]b
|
||||
|
||||
// HasBlock checks if a block by root exists in the db.
|
||||
func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
defer span.End()
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return true
|
||||
@@ -304,6 +305,16 @@ func (s *Store) SaveBlocks(ctx context.Context, blocks []interfaces.SignedBeacon
|
||||
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedVersion) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
blk = blindedBlock
|
||||
}
|
||||
}
|
||||
s.blockCache.Set(string(blockRoots[i]), blk, int64(len(encodedBlocks[i])))
|
||||
if err := bkt.Put(blockRoots[i], encodedBlocks[i]); err != nil {
|
||||
return err
|
||||
@@ -315,7 +326,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blocks []interfaces.SignedBeacon
|
||||
|
||||
// SaveHeadBlockRoot to the db.
|
||||
func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, blockRoot)
|
||||
@@ -366,7 +377,7 @@ func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
// SaveGenesisBlockRoot to the db.
|
||||
func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -379,7 +390,7 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
|
||||
// This value is used by a running beacon chain node to locate the state at the beginning
|
||||
// of the chain history, in places where genesis would typically be used.
|
||||
func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -390,7 +401,7 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
|
||||
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
|
||||
// the node was initialized via checkpoint sync.
|
||||
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -489,7 +500,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.Validato
|
||||
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
|
||||
// Error is returned if `ids` and `recipients` are not the same length.
|
||||
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, feeRecipients []common.Address) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
|
||||
defer span.End()
|
||||
|
||||
if len(ids) != len(feeRecipients) {
|
||||
@@ -527,7 +538,7 @@ func (s *Store) RegistrationByValidatorID(ctx context.Context, id types.Validato
|
||||
// SaveRegistrationsByValidatorIDs saves the validator registrations for validator ids.
|
||||
// Error is returned if `ids` and `registrations` are not the same length.
|
||||
func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveRegistrationsByValidatorIDs")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveRegistrationsByValidatorIDs")
|
||||
defer span.End()
|
||||
|
||||
if len(ids) != len(regs) {
|
||||
@@ -614,7 +625,7 @@ func blockRootsBySlotRange(
|
||||
bkt *bolt.Bucket,
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
|
||||
) ([][]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
defer span.End()
|
||||
|
||||
// Return nothing when all slot parameters are missing
|
||||
@@ -679,7 +690,7 @@ func blockRootsBySlotRange(
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
@@ -700,7 +711,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][32]
|
||||
// a map of bolt DB index buckets corresponding to each particular key for indices for
|
||||
// data, such as (shard indices bucket -> shard 5).
|
||||
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.BeaconBlock) map[string][]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
// Every index has a unique bucket for fast, binary-search
|
||||
@@ -728,7 +739,7 @@ func createBlockIndicesFromBlock(ctx context.Context, block interfaces.BeaconBlo
|
||||
// objects. If a certain filter criterion does not apply to
|
||||
// blocks, an appropriate error is returned.
|
||||
func createBlockIndicesFromFilters(ctx context.Context, f *filters.QueryFilter) (map[string][]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
for k, v := range f.Filters() {
|
||||
@@ -758,7 +769,7 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
|
||||
var err error
|
||||
enc, err = snappy.Decode(nil, enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not snappy decode block")
|
||||
}
|
||||
var rawBlock ssz.Unmarshaler
|
||||
switch {
|
||||
@@ -766,23 +777,23 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
|
||||
// Marshal block bytes to altair beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlockAltair{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal Altair block")
|
||||
}
|
||||
case hasBellatrixKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockBellatrix{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal Bellatrix block")
|
||||
}
|
||||
case hasBellatrixBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockBellatrix{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(bellatrixBlindKey):]); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Bellatrix block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal Phase0 block")
|
||||
}
|
||||
}
|
||||
return wrapper.WrappedSignedBeaconBlock(rawBlock)
|
||||
@@ -790,19 +801,41 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
|
||||
|
||||
// marshal versioned beacon block from struct type down to bytes.
|
||||
func marshalBlock(_ context.Context, blk interfaces.SignedBeaconBlock) ([]byte, error) {
|
||||
obj, err := blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var encodedBlock []byte
|
||||
var err error
|
||||
blockToSave := blk
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
switch {
|
||||
case errors.Is(err, wrapper.ErrUnsupportedVersion):
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal non-blinded block")
|
||||
}
|
||||
case err != nil:
|
||||
return nil, errors.Wrap(err, "could not convert block to blinded format")
|
||||
default:
|
||||
encodedBlock, err = blindedBlock.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
blockToSave = blindedBlock
|
||||
}
|
||||
} else {
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
switch blk.Version() {
|
||||
switch blockToSave.Version() {
|
||||
case version.BellatrixBlind:
|
||||
return snappy.Encode(nil, append(bellatrixBlindKey, obj...)), nil
|
||||
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
return snappy.Encode(nil, append(bellatrixKey, obj...)), nil
|
||||
return snappy.Encode(nil, append(bellatrixKey, encodedBlock...)), nil
|
||||
case version.Altair:
|
||||
return snappy.Encode(nil, append(altairKey, obj...)), nil
|
||||
return snappy.Encode(nil, append(altairKey, encodedBlock...)), nil
|
||||
case version.Phase0:
|
||||
return snappy.Encode(nil, obj), nil
|
||||
return snappy.Encode(nil, encodedBlock), nil
|
||||
default:
|
||||
return nil, errors.New("Unknown block version")
|
||||
}
|
||||
|
||||
@@ -134,11 +134,17 @@ func TestStore_BlocksCRUD(t *testing.T) {
|
||||
retrievedBlock, err := db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, nil, retrievedBlock, "Expected nil block")
|
||||
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
|
||||
retrievedBlock, err = db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(blk.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", blk, retrievedBlock)
|
||||
wanted := retrievedBlock
|
||||
if _, err := retrievedBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = retrievedBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", wanted, retrievedBlock)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -314,7 +320,13 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
|
||||
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
|
||||
retrievedBlock, err = db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(blk.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", blk, retrievedBlock)
|
||||
|
||||
wanted := blk
|
||||
if _, err := blk.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", wanted, retrievedBlock)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -524,7 +536,12 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
wanted := block1
|
||||
if _, err := block1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 11)
|
||||
require.NoError(t, err)
|
||||
@@ -533,7 +550,12 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block2.Proto(), b.Proto()), "Wanted: %v, received: %v", block2, b)
|
||||
wanted2 := block2
|
||||
if _, err := block2.PbBellatrixBlock(); err == nil {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted2.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted2, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 101)
|
||||
require.NoError(t, err)
|
||||
@@ -542,7 +564,12 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block3.Proto(), b.Proto()), "Wanted: %v, received: %v", block3, b)
|
||||
wanted = block3
|
||||
if _, err := block3.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -569,7 +596,12 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
wanted := block1
|
||||
if _, err := block1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -577,7 +609,12 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
wanted = genesisBlock
|
||||
if _, err := genesisBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -585,7 +622,12 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
wanted = genesisBlock
|
||||
if _, err := genesisBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -671,15 +713,31 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
assert.Equal(t, 0, len(retrievedBlocks), "Unexpected number of blocks received, expected none")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(b1.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b1, retrievedBlocks[0])
|
||||
|
||||
wanted := b1
|
||||
if _, err := b1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(retrievedBlocks[0].Proto(), wanted.Proto()), "Wanted: %v, received: %v", retrievedBlocks[0], wanted)
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
|
||||
require.NoError(t, err)
|
||||
if len(retrievedBlocks) != 2 {
|
||||
t.Fatalf("Expected 2 blocks, received %d blocks", len(retrievedBlocks))
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(b2.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b2, retrievedBlocks[0])
|
||||
assert.Equal(t, true, proto.Equal(b3.Proto(), retrievedBlocks[1].Proto()), "Wanted: %v, received: %v", b3, retrievedBlocks[1])
|
||||
wanted = b2
|
||||
if _, err := b2.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", retrievedBlocks[0], wanted)
|
||||
wanted = b3
|
||||
if _, err := b3.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(retrievedBlocks[1].Proto(), wanted.Proto()), "Wanted: %v, received: %v", retrievedBlocks[1], wanted)
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
|
||||
hasBlockRoots, retrievedBlockRoots, err := db.BlockRootsBySlot(ctx, 1)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// DepositContractAddress returns contract address is the address of
|
||||
// the deposit contract on the proof of work chain.
|
||||
func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
|
||||
defer span.End()
|
||||
var addr []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -27,7 +27,7 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
|
||||
// SaveDepositContractAddress to the db. It returns an error if an address has been previously saved.
|
||||
func (s *Store) SaveDepositContractAddress(ctx context.Context, addr common.Address) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func decode(ctx context.Context, data []byte, dst proto.Message) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.decode")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.decode")
|
||||
defer span.End()
|
||||
|
||||
data, err := snappy.Decode(nil, data)
|
||||
@@ -27,7 +27,7 @@ func decode(ctx context.Context, data []byte, dst proto.Message) error {
|
||||
}
|
||||
|
||||
func encode(ctx context.Context, msg proto.Message) ([]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.encode")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.encode")
|
||||
defer span.End()
|
||||
|
||||
if msg == nil || reflect.ValueOf(msg).IsNil() {
|
||||
|
||||
@@ -166,7 +166,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
|
||||
// considered canonical in the "head view" of the beacon node.
|
||||
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
|
||||
defer span.End()
|
||||
|
||||
var exists bool
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
@@ -9,4 +10,7 @@ func init() {
|
||||
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
features.Init(&features.Flags{
|
||||
EnableOnlyBlindedBeaconBlocks: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
prombolt "github.com/prysmaticlabs/prombbolt"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -183,8 +185,13 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = prometheus.Register(createBoltCollector(kv.db))
|
||||
return kv, err
|
||||
if err = prometheus.Register(createBoltCollector(kv.db)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = kv.checkNeedsResync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// ClearDB removes the previously stored database in the data directory.
|
||||
@@ -216,6 +223,23 @@ func (s *Store) DatabasePath() string {
|
||||
return s.databasePath
|
||||
}
|
||||
|
||||
func (s *Store) checkNeedsResync() error {
|
||||
return s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(migrationsBucket)
|
||||
hasDisabledFeature := !features.Get().EnableOnlyBlindedBeaconBlocks
|
||||
if hasDisabledFeature && bkt.Get(migrationBlindedBeaconBlocksKey) != nil {
|
||||
return fmt.Errorf(
|
||||
"you have disabled the flag %s, and your node must resync to ensure your "+
|
||||
"database is compatible. If you do not want to resync, please re-enable the %s flag",
|
||||
features.EnableOnlyBlindedBeaconBlocks.Name,
|
||||
features.EnableOnlyBlindedBeaconBlocks.Name,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func createBuckets(tx *bolt.Tx, buckets ...[]byte) error {
|
||||
for _, bucket := range buckets {
|
||||
if _, err := tx.CreateBucketIfNotExists(bucket); err != nil {
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// setupDB instantiates and returns a Store instance.
|
||||
@@ -16,3 +18,17 @@ func setupDB(t testing.TB) *Store {
|
||||
})
|
||||
return db
|
||||
}
|
||||
|
||||
func Test_checkNeedsResync(t *testing.T) {
|
||||
store := setupDB(t)
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOnlyBlindedBeaconBlocks: false,
|
||||
})
|
||||
defer resetFn()
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(migrationsBucket)
|
||||
return bkt.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
|
||||
}))
|
||||
err := store.checkNeedsResync()
|
||||
require.ErrorContains(t, "your node must resync", err)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ var migrations = []migration{
|
||||
migrateArchivedIndex,
|
||||
migrateBlockSlotIndex,
|
||||
migrateStateValidators,
|
||||
migrateBlindedBeaconBlocksEnabled,
|
||||
}
|
||||
|
||||
// RunMigrations defined in the migrations array.
|
||||
|
||||
27
beacon-chain/db/kv/migration_blinded_beacon_blocks.go
Normal file
27
beacon-chain/db/kv/migration_blinded_beacon_blocks.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var migrationBlindedBeaconBlocksKey = []byte("blinded-beacon-blocks-enabled")
|
||||
|
||||
func migrateBlindedBeaconBlocksEnabled(ctx context.Context, db *bolt.DB) error {
|
||||
if !features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
return nil // Only write to the migrations bucket if the feature is enabled.
|
||||
}
|
||||
if updateErr := db.Update(func(tx *bolt.Tx) error {
|
||||
mb := tx.Bucket(migrationsBucket)
|
||||
if b := mb.Get(migrationBlindedBeaconBlocksKey); bytes.Equal(b, migrationCompleted) {
|
||||
return nil // Migration already completed.
|
||||
}
|
||||
return mb.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
|
||||
}); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
// SavePowchainData saves the pow chain data.
|
||||
func (s *Store) SavePowchainData(ctx context.Context, data *v2.ETH1ChainData) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SavePowchainData")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SavePowchainData")
|
||||
defer span.End()
|
||||
|
||||
if data == nil {
|
||||
@@ -36,7 +36,7 @@ func (s *Store) SavePowchainData(ctx context.Context, data *v2.ETH1ChainData) er
|
||||
|
||||
// PowchainData retrieves the powchain data.
|
||||
func (s *Store) PowchainData(ctx context.Context) (*v2.ETH1ChainData, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.PowchainData")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.PowchainData")
|
||||
defer span.End()
|
||||
|
||||
var data *v2.ETH1ChainData
|
||||
|
||||
@@ -338,7 +338,7 @@ func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx
|
||||
|
||||
// HasState checks if a state by root exists in the db.
|
||||
func (s *Store) HasState(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasState")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasState")
|
||||
defer span.End()
|
||||
hasState := false
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -615,7 +615,7 @@ func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*et
|
||||
|
||||
// retrieves and assembles the state information from multiple buckets.
|
||||
func (s *Store) stateBytes(ctx context.Context, blockRoot [32]byte) ([]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.stateBytes")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.stateBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -738,7 +738,7 @@ func (s *Store) HighestSlotStatesBelow(ctx context.Context, slot types.Slot) ([]
|
||||
// a map of bolt DB index buckets corresponding to each particular key for indices for
|
||||
// data, such as (shard indices bucket -> shard 5).
|
||||
func createStateIndicesFromStateSlot(ctx context.Context, slot types.Slot) map[string][]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createStateIndicesFromState")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createStateIndicesFromState")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
// Every index has a unique bucket for fast, binary-search
|
||||
|
||||
@@ -64,7 +64,7 @@ func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.St
|
||||
|
||||
// HasStateSummary returns true if a state summary exists in DB.
|
||||
func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
|
||||
defer span.End()
|
||||
|
||||
var hasSummary bool
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
// we might find roots `0x23` and `0x45` stored under that index. We can then
|
||||
// do a batch read for attestations corresponding to those roots.
|
||||
func lookupValuesForIndices(ctx context.Context, indicesByBucket map[string][]byte, tx *bolt.Tx) [][][]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.lookupValuesForIndices")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.lookupValuesForIndices")
|
||||
defer span.End()
|
||||
values := make([][][]byte, 0, len(indicesByBucket))
|
||||
for k, v := range indicesByBucket {
|
||||
@@ -37,7 +37,7 @@ func lookupValuesForIndices(ctx context.Context, indicesByBucket map[string][]by
|
||||
// values stored at said index. Typically, indices are roots of data that can then
|
||||
// be used for reads or batch reads from the DB.
|
||||
func updateValueForIndices(ctx context.Context, indicesByBucket map[string][]byte, root []byte, tx *bolt.Tx) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.updateValueForIndices")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateValueForIndices")
|
||||
defer span.End()
|
||||
for k, idx := range indicesByBucket {
|
||||
bkt := tx.Bucket([]byte(k))
|
||||
@@ -63,7 +63,7 @@ func updateValueForIndices(ctx context.Context, indicesByBucket map[string][]byt
|
||||
|
||||
// deleteValueForIndices clears a root stored at each index.
|
||||
func deleteValueForIndices(ctx context.Context, indicesByBucket map[string][]byte, root []byte, tx *bolt.Tx) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.deleteValueForIndices")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteValueForIndices")
|
||||
defer span.End()
|
||||
for k, idx := range indicesByBucket {
|
||||
bkt := tx.Bucket([]byte(k))
|
||||
|
||||
@@ -31,7 +31,7 @@ const (
|
||||
func (s *Store) LastEpochWrittenForValidators(
|
||||
ctx context.Context, validatorIndices []types.ValidatorIndex,
|
||||
) ([]*slashertypes.AttestedEpochForValidator, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastEpochWrittenForValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastEpochWrittenForValidators")
|
||||
defer span.End()
|
||||
attestedEpochs := make([]*slashertypes.AttestedEpochForValidator, 0)
|
||||
encodedIndices := make([][]byte, len(validatorIndices))
|
||||
@@ -63,7 +63,7 @@ func (s *Store) LastEpochWrittenForValidators(
|
||||
func (s *Store) SaveLastEpochsWrittenForValidators(
|
||||
ctx context.Context, epochByValidator map[types.ValidatorIndex]types.Epoch,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
|
||||
defer span.End()
|
||||
encodedIndices := make([][]byte, 0, len(epochByValidator))
|
||||
encodedEpochs := make([][]byte, 0, len(epochByValidator))
|
||||
@@ -183,7 +183,7 @@ func (s *Store) CheckAttesterDoubleVotes(
|
||||
func (s *Store) AttestationRecordForValidator(
|
||||
ctx context.Context, validatorIdx types.ValidatorIndex, targetEpoch types.Epoch,
|
||||
) (*slashertypes.IndexedAttestationWrapper, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.AttestationRecordForValidator")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AttestationRecordForValidator")
|
||||
defer span.End()
|
||||
var record *slashertypes.IndexedAttestationWrapper
|
||||
encIdx := encodeValidatorIndex(validatorIdx)
|
||||
@@ -215,7 +215,7 @@ func (s *Store) SaveAttestationRecordsForValidators(
|
||||
ctx context.Context,
|
||||
attestations []*slashertypes.IndexedAttestationWrapper,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
|
||||
defer span.End()
|
||||
encodedTargetEpoch := make([][]byte, len(attestations))
|
||||
encodedRecords := make([][]byte, len(attestations))
|
||||
@@ -259,7 +259,7 @@ func (s *Store) SaveAttestationRecordsForValidators(
|
||||
func (s *Store) LoadSlasherChunks(
|
||||
ctx context.Context, kind slashertypes.ChunkKind, diskKeys [][]byte,
|
||||
) ([][]uint16, []bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
|
||||
defer span.End()
|
||||
chunks := make([][]uint16, 0)
|
||||
var exists []bool
|
||||
@@ -290,7 +290,7 @@ func (s *Store) LoadSlasherChunks(
|
||||
func (s *Store) SaveSlasherChunks(
|
||||
ctx context.Context, kind slashertypes.ChunkKind, chunkKeys [][]byte, chunks [][]uint16,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
|
||||
defer span.End()
|
||||
encodedKeys := make([][]byte, len(chunkKeys))
|
||||
encodedChunks := make([][]byte, len(chunkKeys))
|
||||
@@ -320,7 +320,7 @@ func (s *Store) SaveSlasherChunks(
|
||||
func (s *Store) CheckDoubleBlockProposals(
|
||||
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
|
||||
) ([]*ethpb.ProposerSlashing, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.CheckDoubleBlockProposals")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.CheckDoubleBlockProposals")
|
||||
defer span.End()
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposals))
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -359,7 +359,7 @@ func (s *Store) CheckDoubleBlockProposals(
|
||||
func (s *Store) BlockProposalForValidator(
|
||||
ctx context.Context, validatorIdx types.ValidatorIndex, slot types.Slot,
|
||||
) (*slashertypes.SignedBlockHeaderWrapper, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.BlockProposalForValidator")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlockProposalForValidator")
|
||||
defer span.End()
|
||||
var record *slashertypes.SignedBlockHeaderWrapper
|
||||
key, err := keyForValidatorProposal(slot, validatorIdx)
|
||||
@@ -387,7 +387,7 @@ func (s *Store) BlockProposalForValidator(
|
||||
func (s *Store) SaveBlockProposals(
|
||||
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBlockProposals")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlockProposals")
|
||||
defer span.End()
|
||||
encodedKeys := make([][]byte, len(proposals))
|
||||
encodedProposals := make([][]byte, len(proposals))
|
||||
|
||||
@@ -67,6 +67,7 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -89,7 +89,7 @@ func (f *ForkChoice) Head(
|
||||
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
|
||||
// and update their votes accordingly.
|
||||
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch types.Epoch) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessAttestation")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessAttestation")
|
||||
defer span.End()
|
||||
f.votesLock.Lock()
|
||||
defer f.votesLock.Unlock()
|
||||
|
||||
@@ -2,9 +2,11 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
@@ -281,3 +283,109 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
|
||||
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
|
||||
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
//
|
||||
// CA -- A -- B -- C-----D
|
||||
// \ \--------------E
|
||||
// \
|
||||
// ----------------------F -- G
|
||||
// B is INVALID
|
||||
//
|
||||
func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 101, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 102, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 103, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 104, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 105, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 106, [32]byte{'f'}, [32]byte{'r'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 107, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(roots))
|
||||
sort.Slice(roots, func(i, j int) bool {
|
||||
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
//
|
||||
// CA -------- B -- C-----D
|
||||
// \ \--------------E
|
||||
// \
|
||||
// --A -------------------------F -- G
|
||||
// B is INVALID
|
||||
//
|
||||
func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 101, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 102, [32]byte{'b'}, [32]byte{}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 103, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 104, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 105, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 106, [32]byte{'f'}, [32]byte{'a'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 107, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(roots))
|
||||
sort.Slice(roots, func(i, j int) bool {
|
||||
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
@@ -478,3 +478,21 @@ func TestForkChoice_computeProposerBoostScore(t *testing.T) {
|
||||
require.Equal(t, uint64(8), score)
|
||||
})
|
||||
}
|
||||
|
||||
// Regression test (11053)
|
||||
func TestForkChoice_missingPreviousProposerBoost(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
balances := make([]uint64, 64) // 64 active validators.
|
||||
for i := 0; i < len(balances); i++ {
|
||||
balances[i] = 10
|
||||
}
|
||||
driftGenesisTime(f, 1, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
f.store.previousProposerBoostRoot = [32]byte{'p'}
|
||||
_, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,9 @@ func (s *Store) applyProposerBoostScore(newBalances []uint64) error {
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
return errInvalidProposerBoostRoot
|
||||
s.previousProposerBoostRoot = [32]byte{}
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid prev root %#x", s.previousProposerBoostRoot))
|
||||
return nil
|
||||
}
|
||||
previousNode.balance -= s.previousProposerBoostScore
|
||||
}
|
||||
@@ -35,7 +37,9 @@ func (s *Store) applyProposerBoostScore(newBalances []uint64) error {
|
||||
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
return errInvalidProposerBoostRoot
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid current root %#x", s.proposerBoostRoot))
|
||||
return nil
|
||||
}
|
||||
proposerScore, err = computeProposerBoostScore(newBalances)
|
||||
if err != nil {
|
||||
@@ -63,7 +67,7 @@ func (s *Store) PruneThreshold() uint64 {
|
||||
// head starts from justified root and then follows the best descendant links
|
||||
// to find the best block for head. This function assumes a lock on s.nodesLock
|
||||
func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
|
||||
defer span.End()
|
||||
s.checkpointsLock.RLock()
|
||||
defer s.checkpointsLock.RUnlock()
|
||||
@@ -109,7 +113,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root, parentRoot, payloadHash [fieldparams.RootLength]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) (*Node, error) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
@@ -196,7 +200,7 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
|
||||
// root is different than the current store finalized root, and the number of the store has met prune threshold.
|
||||
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
|
||||
func (s *Store) prune(ctx context.Context) error {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
|
||||
@@ -19,7 +19,7 @@ func computeDeltas(
|
||||
oldBalances, newBalances []uint64,
|
||||
slashedIndices map[types.ValidatorIndex]bool,
|
||||
) ([]int, []Vote, error) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.computeDeltas")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.computeDeltas")
|
||||
defer span.End()
|
||||
|
||||
deltas := make([]int, count)
|
||||
|
||||
@@ -452,3 +452,103 @@ func TestSetOptimisticToInvalid_BogusLVH_RotNotImported(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(invalidRoots))
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
//
|
||||
// CA -- A -- B -- C-----D
|
||||
// \ \--------------E
|
||||
// \
|
||||
// ----------------------F -- G
|
||||
// B is INVALID
|
||||
//
|
||||
func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 101, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 102, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 103, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 104, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 105, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 106, [32]byte{'f'}, [32]byte{'r'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 107, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(roots))
|
||||
require.Equal(t, true, slicesEqual(roots, [][32]byte{[32]byte{'b'}, [32]byte{'c'}, [32]byte{'d'}, [32]byte{'e'}}))
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
//
|
||||
// CA -------- B -- C-----D
|
||||
// \ \--------------E
|
||||
// \
|
||||
// --A -------------------------F -- G
|
||||
// B is INVALID
|
||||
//
|
||||
func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 101, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 102, [32]byte{'b'}, [32]byte{}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 103, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 104, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 105, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 106, [32]byte{'f'}, [32]byte{'a'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 107, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'c'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
require.Equal(t, [32]byte{'d'}, roots[0])
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func (f *ForkChoice) Head(ctx context.Context, justifiedStateBalances []uint64)
|
||||
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
|
||||
// and update their votes accordingly.
|
||||
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch types.Epoch) {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessAttestation")
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessAttestation")
|
||||
defer span.End()
|
||||
f.votesLock.Lock()
|
||||
defer f.votesLock.Unlock()
|
||||
@@ -470,7 +470,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root, parent, payloadHash [32]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) (*Node, error) {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.insert")
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.insert")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
@@ -541,7 +541,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
func (s *Store) applyWeightChanges(
|
||||
ctx context.Context, newBalances []uint64, delta []int,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.applyWeightChanges")
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.applyWeightChanges")
|
||||
defer span.End()
|
||||
|
||||
// The length of the nodes can not be different than length of the delta.
|
||||
@@ -746,7 +746,7 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
|
||||
// prune prunes the store with the new finalized root. The tree is only
|
||||
// pruned if the number of the nodes in store has met prune threshold.
|
||||
func (s *Store) prune(ctx context.Context) error {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
)
|
||||
|
||||
func TestProcessSlashings(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
block *ethpb.BeaconBlock
|
||||
@@ -78,13 +77,13 @@ func TestProcessSlashings(t *testing.T) {
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1, 5, 6},
|
||||
}),
|
||||
},
|
||||
@@ -100,13 +99,13 @@ func TestProcessSlashings(t *testing.T) {
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{3, 5, 6},
|
||||
}),
|
||||
},
|
||||
|
||||
@@ -172,7 +172,9 @@ func TestStart(t *testing.T) {
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
|
||||
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
|
||||
s.Lock()
|
||||
require.Equal(t, s.isLogging, true, "monitor is not running")
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
func TestInitializePerformanceStructures(t *testing.T) {
|
||||
|
||||
@@ -694,6 +694,7 @@ func (b *BeaconNode) registerSyncService() error {
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
regularsync.WithExecutionPayloadReconstructor(web3Service),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
@@ -799,49 +800,50 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
|
||||
p2pService := b.fetchP2P()
|
||||
rpcService := rpc.NewService(b.ctx, &rpc.Config{
|
||||
Host: host,
|
||||
Port: port,
|
||||
BeaconMonitoringHost: beaconMonitoringHost,
|
||||
BeaconMonitoringPort: beaconMonitoringPort,
|
||||
CertFlag: cert,
|
||||
KeyFlag: key,
|
||||
BeaconDB: b.db,
|
||||
Broadcaster: p2pService,
|
||||
PeersFetcher: p2pService,
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadUpdater: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
SlashingChecker: slasherService,
|
||||
SyncCommitteeObjectPool: b.syncCommitteePool,
|
||||
POWChainService: web3Service,
|
||||
POWChainInfoFetcher: web3Service,
|
||||
ChainStartFetcher: chainStartFetcher,
|
||||
MockEth1Votes: mockEth1DataVotes,
|
||||
SyncService: syncService,
|
||||
DepositFetcher: depositFetcher,
|
||||
PendingDepositFetcher: b.depositCache,
|
||||
BlockNotifier: b,
|
||||
StateNotifier: b,
|
||||
OperationNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
ProposerIdsCache: b.proposerIdsCache,
|
||||
ExecutionEngineCaller: web3Service,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
ExecutionEngineCaller: web3Service,
|
||||
ExecutionPayloadReconstructor: web3Service,
|
||||
Host: host,
|
||||
Port: port,
|
||||
BeaconMonitoringHost: beaconMonitoringHost,
|
||||
BeaconMonitoringPort: beaconMonitoringPort,
|
||||
CertFlag: cert,
|
||||
KeyFlag: key,
|
||||
BeaconDB: b.db,
|
||||
Broadcaster: p2pService,
|
||||
PeersFetcher: p2pService,
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadUpdater: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
SlashingChecker: slasherService,
|
||||
SyncCommitteeObjectPool: b.syncCommitteePool,
|
||||
POWChainService: web3Service,
|
||||
POWChainInfoFetcher: web3Service,
|
||||
ChainStartFetcher: chainStartFetcher,
|
||||
MockEth1Votes: mockEth1DataVotes,
|
||||
SyncService: syncService,
|
||||
DepositFetcher: depositFetcher,
|
||||
PendingDepositFetcher: b.depositCache,
|
||||
BlockNotifier: b,
|
||||
StateNotifier: b,
|
||||
OperationNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
ProposerIdsCache: b.proposerIdsCache,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
|
||||
@@ -37,7 +37,7 @@ func (c *AttCaches) AggregateUnaggregatedAttestationsBySlotIndex(ctx context.Con
|
||||
}
|
||||
|
||||
func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unaggregatedAtts []*ethpb.Attestation) error {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAttestations")
|
||||
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.aggregateUnaggregatedAttestations")
|
||||
defer span.End()
|
||||
|
||||
attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation, len(unaggregatedAtts))
|
||||
@@ -168,7 +168,7 @@ func (c *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
|
||||
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) AggregatedAttestationsBySlotIndex(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) []*ethpb.Attestation {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
|
||||
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.AggregatedAttestationsBySlotIndex")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]*ethpb.Attestation, 0)
|
||||
|
||||
@@ -23,15 +23,14 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sig1 := priv.Sign([]byte{'a'})
|
||||
sig2 := priv.Sign([]byte{'b'})
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig1.Marshal()})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1010}, Signature: sig1.Marshal()})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1100}, Signature: sig1.Marshal()})
|
||||
att4 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig2.Marshal()})
|
||||
att5 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig1.Marshal()})
|
||||
att6 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1010}, Signature: sig1.Marshal()})
|
||||
att7 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1100}, Signature: sig1.Marshal()})
|
||||
att8 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig2.Marshal()})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig1.Marshal()})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1010}, Signature: sig1.Marshal()})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1100}, Signature: sig1.Marshal()})
|
||||
att4 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig2.Marshal()})
|
||||
att5 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig1.Marshal()})
|
||||
att6 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1010}, Signature: sig1.Marshal()})
|
||||
att7 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1100}, Signature: sig1.Marshal()})
|
||||
att8 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig2.Marshal()})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3, att4, att5, att6, att7, att8}
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
|
||||
require.NoError(t, cache.AggregateUnaggregatedAttestations(context.Background()))
|
||||
@@ -43,7 +42,7 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
|
||||
func TestKV_Aggregated_AggregateUnaggregatedAttestationsBySlotIndex(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
genData := func(slot types.Slot, committeeIndex types.CommitteeIndex) *ethpb.AttestationData {
|
||||
return util.NewAttestationUtil().HydrateAttestationData(ðpb.AttestationData{
|
||||
return util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: slot,
|
||||
CommitteeIndex: committeeIndex,
|
||||
})
|
||||
@@ -96,7 +95,6 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestationsBySlotIndex(t *testing.T
|
||||
}
|
||||
|
||||
func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
att *ethpb.Attestation
|
||||
@@ -115,14 +113,14 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "not aggregated",
|
||||
att: au.HydrateAttestation(ðpb.Attestation{
|
||||
att: util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b10100}}),
|
||||
wantErrString: "attestation is not aggregated",
|
||||
},
|
||||
{
|
||||
name: "invalid hash",
|
||||
att: ðpb.Attestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
BeaconBlockRoot: []byte{0b0},
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b10111},
|
||||
@@ -131,7 +129,7 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "already seen",
|
||||
att: au.HydrateAttestation(ðpb.Attestation{
|
||||
att: util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 100,
|
||||
},
|
||||
@@ -141,7 +139,7 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "normal save",
|
||||
att: au.HydrateAttestation(ðpb.Attestation{
|
||||
att: util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
@@ -150,7 +148,7 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
count: 1,
|
||||
},
|
||||
}
|
||||
r, err := hashFn(au.HydrateAttestationData(ðpb.AttestationData{
|
||||
r, err := hashFn(util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 100,
|
||||
}))
|
||||
require.NoError(t, err)
|
||||
@@ -174,7 +172,6 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKV_Aggregated_SaveAggregatedAttestations(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
atts []*ethpb.Attestation
|
||||
@@ -184,9 +181,9 @@ func TestKV_Aggregated_SaveAggregatedAttestations(t *testing.T) {
|
||||
{
|
||||
name: "no duplicates",
|
||||
atts: []*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
},
|
||||
count: 1,
|
||||
@@ -210,7 +207,6 @@ func TestKV_Aggregated_SaveAggregatedAttestations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKV_Aggregated_SaveAggregatedAttestations_SomeGoodSomeBad(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
atts []*ethpb.Attestation
|
||||
@@ -220,9 +216,9 @@ func TestKV_Aggregated_SaveAggregatedAttestations_SomeGoodSomeBad(t *testing.T)
|
||||
{
|
||||
name: "the first attestation is bad",
|
||||
atts: []*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
AggregationBits: bitfield.Bitlist{0b1100}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1},
|
||||
AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
},
|
||||
count: 1,
|
||||
@@ -247,11 +243,10 @@ func TestKV_Aggregated_SaveAggregatedAttestations_SomeGoodSomeBad(t *testing.T)
|
||||
|
||||
func TestKV_Aggregated_AggregatedAttestations(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
au := util.AttestationUtil{}
|
||||
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
@@ -266,17 +261,16 @@ func TestKV_Aggregated_AggregatedAttestations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
t.Run("nil attestation", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
assert.ErrorContains(t, "attestation can't be nil", cache.DeleteAggregatedAttestation(nil))
|
||||
att := au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10101}, Data: ðpb.AttestationData{Slot: 2}})
|
||||
att := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10101}, Data: ðpb.AttestationData{Slot: 2}})
|
||||
assert.NoError(t, cache.DeleteAggregatedAttestation(att))
|
||||
})
|
||||
|
||||
t.Run("non aggregated attestation", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
att := au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b1001}, Data: ðpb.AttestationData{Slot: 2}})
|
||||
att := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b1001}, Data: ðpb.AttestationData{Slot: 2}})
|
||||
err := cache.DeleteAggregatedAttestation(att)
|
||||
assert.ErrorContains(t, "attestation is not aggregated", err)
|
||||
})
|
||||
@@ -298,16 +292,16 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
|
||||
t.Run("nonexistent attestation", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
att := au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b1111}, Data: ðpb.AttestationData{Slot: 2}})
|
||||
att := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b1111}, Data: ðpb.AttestationData{Slot: 2}})
|
||||
assert.NoError(t, cache.DeleteAggregatedAttestation(att))
|
||||
})
|
||||
|
||||
t.Run("non-filtered deletion", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att4 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b10101}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11010}})
|
||||
att4 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b10101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3, att4}
|
||||
require.NoError(t, cache.SaveAggregatedAttestations(atts))
|
||||
require.NoError(t, cache.DeleteAggregatedAttestation(att1))
|
||||
@@ -320,10 +314,10 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
|
||||
t.Run("filtered deletion", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b110101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110111}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110100}})
|
||||
att4 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110101}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b110101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110111}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110100}})
|
||||
att4 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3, att4}
|
||||
require.NoError(t, cache.SaveAggregatedAttestations(atts))
|
||||
|
||||
@@ -340,7 +334,6 @@ func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
existing []*ethpb.Attestation
|
||||
@@ -364,7 +357,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "empty cache aggregated",
|
||||
input: au.HydrateAttestation(ðpb.Attestation{
|
||||
input: util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
@@ -373,7 +366,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "empty cache unaggregated",
|
||||
input: au.HydrateAttestation(ðpb.Attestation{
|
||||
input: util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
@@ -383,13 +376,13 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
{
|
||||
name: "single attestation in cache with exact match",
|
||||
existing: []*ethpb.Attestation{{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111}},
|
||||
},
|
||||
input: ðpb.Attestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111}},
|
||||
@@ -398,13 +391,13 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
{
|
||||
name: "single attestation in cache with subset aggregation",
|
||||
existing: []*ethpb.Attestation{{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111}},
|
||||
},
|
||||
input: ðpb.Attestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1110}},
|
||||
@@ -413,13 +406,13 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
{
|
||||
name: "single attestation in cache with superset aggregation",
|
||||
existing: []*ethpb.Attestation{{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1110}},
|
||||
},
|
||||
input: ðpb.Attestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111}},
|
||||
@@ -429,20 +422,20 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
name: "multiple attestations with same data in cache with overlapping aggregation, input is subset",
|
||||
existing: []*ethpb.Attestation{
|
||||
{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
},
|
||||
{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1100111},
|
||||
},
|
||||
},
|
||||
input: ðpb.Attestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1100000}},
|
||||
@@ -452,20 +445,20 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
name: "multiple attestations with same data in cache with overlapping aggregation and input is superset",
|
||||
existing: []*ethpb.Attestation{
|
||||
{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
},
|
||||
{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1100111},
|
||||
},
|
||||
},
|
||||
input: ðpb.Attestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111111}},
|
||||
@@ -475,20 +468,20 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
name: "multiple attestations with different data in cache",
|
||||
existing: []*ethpb.Attestation{
|
||||
{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 2,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
},
|
||||
{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 3,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1100111},
|
||||
},
|
||||
},
|
||||
input: ðpb.Attestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111111}},
|
||||
@@ -498,14 +491,14 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
name: "attestations with different bitlist lengths",
|
||||
existing: []*ethpb.Attestation{
|
||||
{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 2,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111000},
|
||||
},
|
||||
},
|
||||
input: ðpb.Attestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 2,
|
||||
}),
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
@@ -547,9 +540,8 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
func TestKV_Aggregated_DuplicateAggregatedAttestations(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1111}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1111}})
|
||||
atts := []*ethpb.Attestation{att1, att2}
|
||||
|
||||
for _, att := range atts {
|
||||
|
||||
@@ -14,17 +14,16 @@ import (
|
||||
func TestKV_BlockAttestation_CanSaveRetrieve(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
require.NoError(t, cache.SaveBlockAttestation(att))
|
||||
}
|
||||
// Diff bit length should not panic.
|
||||
att4 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11011}})
|
||||
att4 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11011}})
|
||||
if err := cache.SaveBlockAttestation(att4); err != bitfield.ErrBitlistDifferentLength {
|
||||
t.Errorf("Unexpected error: wanted %v, got %v", bitfield.ErrBitlistDifferentLength, err)
|
||||
}
|
||||
@@ -41,10 +40,9 @@ func TestKV_BlockAttestation_CanSaveRetrieve(t *testing.T) {
|
||||
func TestKV_BlockAttestation_CanDelete(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
|
||||
@@ -14,10 +14,9 @@ import (
|
||||
func TestKV_Forkchoice_CanSaveRetrieve(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
@@ -36,10 +35,9 @@ func TestKV_Forkchoice_CanSaveRetrieve(t *testing.T) {
|
||||
func TestKV_Forkchoice_CanDelete(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
@@ -57,10 +55,9 @@ func TestKV_Forkchoice_CanDelete(t *testing.T) {
|
||||
func TestKV_Forkchoice_CanCount(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
|
||||
@@ -12,21 +12,20 @@ import (
|
||||
func TestAttCaches_hasSeenBit(t *testing.T) {
|
||||
c := NewAttCaches()
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
seenA1 := au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000011}})
|
||||
seenA2 := au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b11100000}})
|
||||
seenA1 := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000011}})
|
||||
seenA2 := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b11100000}})
|
||||
require.NoError(t, c.insertSeenBit(seenA1))
|
||||
require.NoError(t, c.insertSeenBit(seenA2))
|
||||
tests := []struct {
|
||||
att *ethpb.Attestation
|
||||
want bool
|
||||
}{
|
||||
{att: au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000000}}), want: true},
|
||||
{att: au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000001}}), want: true},
|
||||
{att: au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b11100000}}), want: true},
|
||||
{att: au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000011}}), want: true},
|
||||
{att: au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10001000}}), want: false},
|
||||
{att: au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b11110111}}), want: false},
|
||||
{att: util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000000}}), want: true},
|
||||
{att: util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000001}}), want: true},
|
||||
{att: util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b11100000}}), want: true},
|
||||
{att: util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000011}}), want: true},
|
||||
{att: util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10001000}}), want: false},
|
||||
{att: util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b11110111}}), want: false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := c.hasSeenBit(tt.att)
|
||||
@@ -39,8 +38,7 @@ func TestAttCaches_hasSeenBit(t *testing.T) {
|
||||
|
||||
func TestAttCaches_insertSeenBitDuplicates(t *testing.T) {
|
||||
c := NewAttCaches()
|
||||
|
||||
att1 := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000011}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10000011}})
|
||||
r, err := hashFn(att1.Data)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.insertSeenBit(att1))
|
||||
|
||||
@@ -71,7 +71,7 @@ func (c *AttCaches) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
|
||||
// UnaggregatedAttestationsBySlotIndex returns the unaggregated attestations in cache,
|
||||
// filtered by committee index and slot.
|
||||
func (c *AttCaches) UnaggregatedAttestationsBySlotIndex(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) []*ethpb.Attestation {
|
||||
_, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndex")
|
||||
ctx, span := trace.StartSpan(ctx, "operations.attestations.kv.UnaggregatedAttestationsBySlotIndex")
|
||||
defer span.End()
|
||||
|
||||
atts := make([]*ethpb.Attestation, 0)
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
)
|
||||
|
||||
func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
att *ethpb.Attestation
|
||||
@@ -44,12 +43,12 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "normal save",
|
||||
att: au.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b0001}}),
|
||||
att: util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b0001}}),
|
||||
count: 1,
|
||||
},
|
||||
{
|
||||
name: "already seen",
|
||||
att: au.HydrateAttestation(ðpb.Attestation{
|
||||
att: util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 100,
|
||||
},
|
||||
@@ -58,7 +57,7 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
count: 0,
|
||||
},
|
||||
}
|
||||
r, err := hashFn(au.HydrateAttestationData(ðpb.AttestationData{Slot: 100}))
|
||||
r, err := hashFn(util.HydrateAttestationData(ðpb.AttestationData{Slot: 100}))
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -84,7 +83,6 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_SaveUnaggregatedAttestations(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
atts []*ethpb.Attestation
|
||||
@@ -94,18 +92,18 @@ func TestKV_Unaggregated_SaveUnaggregatedAttestations(t *testing.T) {
|
||||
{
|
||||
name: "unaggregated only",
|
||||
atts: []*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}}),
|
||||
},
|
||||
count: 3,
|
||||
},
|
||||
{
|
||||
name: "has aggregated",
|
||||
atts: []*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}}),
|
||||
{AggregationBits: bitfield.Bitlist{0b1111}, Data: ðpb.AttestationData{Slot: 2}},
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}}),
|
||||
},
|
||||
wantErrString: "attestation is aggregated",
|
||||
count: 1,
|
||||
@@ -143,11 +141,10 @@ func TestKV_Unaggregated_DeleteUnaggregatedAttestation(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("successful deletion", func(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
cache := NewAttCaches()
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b110}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b110}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b110}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
|
||||
for _, att := range atts {
|
||||
@@ -160,8 +157,7 @@ func TestKV_Unaggregated_DeleteUnaggregatedAttestation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_DeleteSeenUnaggregatedAttestations(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
d := au.HydrateAttestationData(ðpb.AttestationData{})
|
||||
d := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
|
||||
t.Run("no attestations", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
@@ -173,9 +169,9 @@ func TestKV_Unaggregated_DeleteSeenUnaggregatedAttestations(t *testing.T) {
|
||||
t.Run("none seen", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
atts := []*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1001}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1010}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1100}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1001}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1010}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1100}}),
|
||||
}
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
|
||||
assert.Equal(t, 3, cache.UnaggregatedAttestationCount())
|
||||
@@ -190,9 +186,9 @@ func TestKV_Unaggregated_DeleteSeenUnaggregatedAttestations(t *testing.T) {
|
||||
t.Run("some seen", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
atts := []*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1001}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1010}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1100}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1001}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1010}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1100}}),
|
||||
}
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
|
||||
assert.Equal(t, 3, cache.UnaggregatedAttestationCount())
|
||||
@@ -215,9 +211,9 @@ func TestKV_Unaggregated_DeleteSeenUnaggregatedAttestations(t *testing.T) {
|
||||
t.Run("all seen", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
atts := []*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1001}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1010}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1100}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1001}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1010}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b1100}}),
|
||||
}
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestations(atts))
|
||||
assert.Equal(t, 3, cache.UnaggregatedAttestationCount())
|
||||
@@ -240,10 +236,9 @@ func TestKV_Unaggregated_DeleteSeenUnaggregatedAttestations(t *testing.T) {
|
||||
func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndex(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 2}, AggregationBits: bitfield.Bitlist{0b110}})
|
||||
att3 := au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b110}})
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 2}, AggregationBits: bitfield.Bitlist{0b110}})
|
||||
att3 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2, CommitteeIndex: 1}, AggregationBits: bitfield.Bitlist{0b110}})
|
||||
atts := []*ethpb.Attestation{att1, att2, att3}
|
||||
|
||||
for _, att := range atts {
|
||||
|
||||
@@ -234,7 +234,7 @@ func TestSeenAttestations_PresentInCache(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
ad1 := util.NewAttestationUtil().HydrateAttestationData(ðpb.AttestationData{})
|
||||
ad1 := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
att1 := ðpb.Attestation{Data: ad1, Signature: []byte{'A'}, AggregationBits: bitfield.Bitlist{0x13} /* 0b00010011 */}
|
||||
got, err := s.seen(att1)
|
||||
require.NoError(t, err)
|
||||
@@ -252,9 +252,9 @@ func TestSeenAttestations_PresentInCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_seen(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
ad1 := au.HydrateAttestationData(ðpb.AttestationData{Slot: 1})
|
||||
ad2 := au.HydrateAttestationData(ðpb.AttestationData{Slot: 2})
|
||||
ad1 := util.HydrateAttestationData(ðpb.AttestationData{Slot: 1})
|
||||
|
||||
ad2 := util.HydrateAttestationData(ðpb.AttestationData{Slot: 2})
|
||||
|
||||
// Attestation are checked in order of this list.
|
||||
tests := []struct {
|
||||
|
||||
@@ -26,9 +26,9 @@ func TestPruneExpired_Ticker(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
ad1 := au.HydrateAttestationData(ðpb.AttestationData{})
|
||||
ad2 := au.HydrateAttestationData(ðpb.AttestationData{Slot: 1})
|
||||
ad1 := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
|
||||
ad2 := util.HydrateAttestationData(ðpb.AttestationData{Slot: 1})
|
||||
|
||||
atts := []*ethpb.Attestation{
|
||||
{Data: ad1, AggregationBits: bitfield.Bitlist{0b1000, 0b1}, Signature: make([]byte, fieldparams.BLSSignatureLength)},
|
||||
@@ -85,9 +85,9 @@ func TestPruneExpired_PruneExpiredAtts(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
ad1 := au.HydrateAttestationData(ðpb.AttestationData{})
|
||||
ad2 := au.HydrateAttestationData(ðpb.AttestationData{})
|
||||
ad1 := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
|
||||
ad2 := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
|
||||
att1 := ðpb.Attestation{Data: ad1, AggregationBits: bitfield.Bitlist{0b1101}}
|
||||
att2 := ðpb.Attestation{Data: ad1, AggregationBits: bitfield.Bitlist{0b1111}}
|
||||
|
||||
@@ -33,7 +33,7 @@ func NewPool() *Pool {
|
||||
func (p *Pool) PendingAttesterSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []*ethpb.AttesterSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
|
||||
ctx, span := trace.StartSpan(ctx, "operations.PendingAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
@@ -80,7 +80,7 @@ func (p *Pool) PendingAttesterSlashings(ctx context.Context, state state.ReadOnl
|
||||
func (p *Pool) PendingProposerSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []*ethpb.ProposerSlashing {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
|
||||
ctx, span := trace.StartSpan(ctx, "operations.PendingProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
// Update prom metric.
|
||||
@@ -187,7 +187,7 @@ func (p *Pool) InsertProposerSlashing(
|
||||
) error {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
_, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
|
||||
ctx, span := trace.StartSpan(ctx, "operations.InsertProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {
|
||||
|
||||
@@ -66,7 +66,7 @@ func (p *Pool) PendingExits(state state.ReadOnlyBeaconState, slot types.Slot, no
|
||||
// InsertVoluntaryExit into the pool. This method is a no-op if the pending exit already exists,
|
||||
// or the validator is already exited.
|
||||
func (p *Pool) InsertVoluntaryExit(ctx context.Context, state state.ReadOnlyBeaconState, exit *ethpb.SignedVoluntaryExit) {
|
||||
_, span := trace.StartSpan(ctx, "exitPool.InsertVoluntaryExit")
|
||||
ctx, span := trace.StartSpan(ctx, "exitPool.InsertVoluntaryExit")
|
||||
defer span.End()
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
@@ -198,7 +198,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
defer span.End()
|
||||
|
||||
span.AddAttributes(trace.StringAttribute("topic", topic))
|
||||
|
||||
@@ -162,7 +162,7 @@ func TestService_BroadcastAttestation(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
msg := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
subnet := uint64(5)
|
||||
|
||||
topic := AttestationSubnetTopicFormat
|
||||
@@ -323,7 +323,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
go p.listenForNewNodes()
|
||||
go p2.listenForNewNodes()
|
||||
|
||||
msg := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
topic := AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
|
||||
@@ -105,7 +105,6 @@ go_test(
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
|
||||
@@ -60,7 +60,7 @@ type ExecutionPayloadReconstructor interface {
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
// execution node's engine service via JSON-RPC.
|
||||
type EngineCaller interface {
|
||||
NewPayload(ctx context.Context, payload *pb.ExecutionPayload) ([]byte, error)
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error)
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
@@ -68,12 +68,12 @@ type EngineCaller interface {
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error)
|
||||
}
|
||||
|
||||
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
|
||||
func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload) ([]byte, error) {
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -84,7 +84,11 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.PayloadStatus{}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payload)
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayload)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be an execution payload")
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payloadPb)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
@@ -243,7 +247,7 @@ func (s *Service) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error
|
||||
if parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash)
|
||||
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash, false /* no txs */)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
@@ -292,12 +296,11 @@ func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock,
|
||||
|
||||
// ExecutionBlockByHash fetches an execution engine block by hash by calling
|
||||
// eth_blockByHash via JSON-RPC.
|
||||
func (s *Service) ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error) {
|
||||
func (s *Service) ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExecutionBlockByHash")
|
||||
defer span.End()
|
||||
|
||||
result := &pb.ExecutionBlock{}
|
||||
err := s.rpcClient.CallContext(ctx, result, ExecutionBlockByHashMethod, hash, false /* no full transaction objects */)
|
||||
err := s.rpcClient.CallContext(ctx, result, ExecutionBlockByHashMethod, hash, withTxs)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
@@ -312,12 +315,12 @@ func (s *Service) ReconstructFullBellatrixBlock(
|
||||
if !blindedBlock.Block().IsBlinded() {
|
||||
return nil, errors.New("can only reconstruct block from blinded block format")
|
||||
}
|
||||
header, err := blindedBlock.Block().Body().ExecutionPayloadHeader()
|
||||
header, err := blindedBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
executionBlockHash := common.BytesToHash(header.BlockHash)
|
||||
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash)
|
||||
executionBlockHash := common.BytesToHash(header.BlockHash())
|
||||
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
@@ -337,15 +340,15 @@ func (s *Service) ReconstructFullBellatrixBlock(
|
||||
}
|
||||
|
||||
func fullPayloadFromExecutionBlock(
|
||||
header *pb.ExecutionPayloadHeader, block *pb.ExecutionBlock,
|
||||
header interfaces.ExecutionData, block *pb.ExecutionBlock,
|
||||
) (*pb.ExecutionPayload, error) {
|
||||
if header == nil || block == nil {
|
||||
if header.IsNil() || block == nil {
|
||||
return nil, errors.New("execution block and header cannot be nil")
|
||||
}
|
||||
if !bytes.Equal(header.BlockHash, block.Hash[:]) {
|
||||
if !bytes.Equal(header.BlockHash(), block.Hash[:]) {
|
||||
return nil, fmt.Errorf(
|
||||
"block hash field in execution header %#x does not match execution block hash %#x",
|
||||
header.BlockHash,
|
||||
header.BlockHash(),
|
||||
block.Hash,
|
||||
)
|
||||
}
|
||||
@@ -358,18 +361,18 @@ func fullPayloadFromExecutionBlock(
|
||||
txs[i] = txBin
|
||||
}
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: header.ParentHash,
|
||||
FeeRecipient: header.FeeRecipient,
|
||||
StateRoot: header.StateRoot,
|
||||
ReceiptsRoot: header.ReceiptsRoot,
|
||||
LogsBloom: header.LogsBloom,
|
||||
PrevRandao: header.PrevRandao,
|
||||
BlockNumber: header.BlockNumber,
|
||||
GasLimit: header.GasLimit,
|
||||
GasUsed: header.GasUsed,
|
||||
Timestamp: header.Timestamp,
|
||||
ExtraData: header.ExtraData,
|
||||
BaseFeePerGas: header.BaseFeePerGas,
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: block.Hash[:],
|
||||
Transactions: txs,
|
||||
}, nil
|
||||
|
||||
@@ -8,10 +8,14 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/beacon"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -68,6 +72,47 @@ func FuzzForkChoiceResponse(f *testing.F) {
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExchangeTransitionConfiguration(f *testing.F) {
|
||||
valHash := common.Hash([32]byte{0xFF, 0x01})
|
||||
ttd := hexutil.Big(*big.NewInt(math.MaxInt))
|
||||
seed := &beacon.TransitionConfigurationV1{
|
||||
TerminalTotalDifficulty: &ttd,
|
||||
TerminalBlockHash: valHash,
|
||||
TerminalBlockNumber: hexutil.Uint64(math.MaxUint64),
|
||||
}
|
||||
|
||||
output, err := json.Marshal(seed)
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &beacon.TransitionConfigurationV1{}
|
||||
prysmResp := &pb.TransitionConfiguration{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
// Nothing to marshal if we have an error.
|
||||
if gethErr != nil {
|
||||
return
|
||||
}
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
if gethErr != nil {
|
||||
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
|
||||
}
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
if gethErr != nil {
|
||||
t.Errorf("%s %s", gethResp.TerminalTotalDifficulty.String(), prysmResp.TerminalTotalDifficulty)
|
||||
}
|
||||
newGethResp := &beacon.TransitionConfigurationV1{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
newGethResp2 := &beacon.TransitionConfigurationV1{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExecutionPayload(f *testing.F) {
|
||||
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
|
||||
execData := &beacon.ExecutableDataV1{
|
||||
@@ -109,6 +154,133 @@ func FuzzExecutionPayload(f *testing.F) {
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
assert.DeepEqual(t, newGethResp.LogsBloom, newGethResp2.LogsBloom)
|
||||
assert.DeepEqual(t, newGethResp, newGethResp2)
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExecutionBlock(f *testing.F) {
|
||||
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
|
||||
addr := common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87")
|
||||
innerData := &types.DynamicFeeTx{
|
||||
ChainID: big.NewInt(math.MaxInt),
|
||||
Nonce: math.MaxUint64,
|
||||
GasTipCap: big.NewInt(math.MaxInt),
|
||||
GasFeeCap: big.NewInt(math.MaxInt),
|
||||
Gas: math.MaxUint64,
|
||||
To: &addr,
|
||||
Value: big.NewInt(math.MaxInt),
|
||||
Data: []byte{'r', 'a', 'n', 'd', 'o', 'm'},
|
||||
|
||||
// Signature values
|
||||
V: big.NewInt(0),
|
||||
R: big.NewInt(math.MaxInt),
|
||||
S: big.NewInt(math.MaxInt),
|
||||
}
|
||||
tx := types.NewTx(innerData)
|
||||
execBlock := &pb.ExecutionBlock{
|
||||
Header: types.Header{
|
||||
ParentHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
Root: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
ReceiptHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
Bloom: types.Bloom(logsBloom),
|
||||
Number: big.NewInt(math.MaxInt),
|
||||
GasLimit: math.MaxUint64,
|
||||
GasUsed: math.MaxUint64,
|
||||
Time: 100,
|
||||
Extra: nil,
|
||||
BaseFee: big.NewInt(math.MaxInt),
|
||||
Difficulty: big.NewInt(math.MaxInt),
|
||||
},
|
||||
Hash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
TotalDifficulty: "999999999999999999999999999999999999999",
|
||||
Transactions: []*types.Transaction{tx, tx},
|
||||
}
|
||||
output, err := json.Marshal(execBlock)
|
||||
assert.NoError(f, err)
|
||||
|
||||
f.Add(output)
|
||||
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := make(map[string]interface{})
|
||||
prysmResp := &pb.ExecutionBlock{}
|
||||
gethErr := json.Unmarshal(jsonBlob, &gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
// Nothing to marshal if we have an error.
|
||||
if gethErr != nil || prysmErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(t, validateBlockConsistency(prysmResp, gethResp))
|
||||
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
|
||||
newGethResp := make(map[string]interface{})
|
||||
newGethErr := json.Unmarshal(prysmBlob, &newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
newGethResp2 := make(map[string]interface{})
|
||||
newGethErr = json.Unmarshal(gethBlob, &newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
assert.DeepEqual(t, newGethResp, newGethResp2)
|
||||
compareHeaders(t, jsonBlob)
|
||||
})
|
||||
}
|
||||
|
||||
func compareHeaders(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &types.Header{}
|
||||
prysmResp := &pb.ExecutionBlock{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
// Nothing to marshal if we have an error.
|
||||
if gethErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp.Header)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
|
||||
newGethResp := &types.Header{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
newGethResp2 := &types.Header{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
assert.DeepEqual(t, newGethResp, newGethResp2)
|
||||
}
|
||||
|
||||
func validateBlockConsistency(execBlock *pb.ExecutionBlock, jsonMap map[string]interface{}) error {
|
||||
blockVal := reflect.ValueOf(execBlock).Elem()
|
||||
bType := reflect.TypeOf(execBlock).Elem()
|
||||
|
||||
fieldnum := bType.NumField()
|
||||
|
||||
for i := 0; i < fieldnum; i++ {
|
||||
field := bType.Field(i)
|
||||
fName := field.Tag.Get("json")
|
||||
if field.Name == "Header" {
|
||||
continue
|
||||
}
|
||||
if fName == "" {
|
||||
return errors.Errorf("Field %s had no json tag", field.Name)
|
||||
}
|
||||
fVal, ok := jsonMap[fName]
|
||||
if !ok {
|
||||
return errors.Errorf("%s doesn't exist in json map for field %s", fName, field.Name)
|
||||
}
|
||||
jsonVal := fVal
|
||||
bVal := blockVal.Field(i).Interface()
|
||||
if field.Name == "Hash" {
|
||||
jsonVal = common.HexToHash(jsonVal.(string))
|
||||
}
|
||||
if field.Name == "Transactions" {
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(jsonVal, bVal) {
|
||||
return errors.Errorf("fields dont match, %v and %v are not equal for field %s", jsonVal, bVal, field.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
mocks "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
@@ -32,6 +31,7 @@ import (
|
||||
var (
|
||||
_ = ExecutionPayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&Service{})
|
||||
_ = ExecutionPayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&mocks.EngineClient{})
|
||||
)
|
||||
|
||||
@@ -66,7 +66,9 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
req, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
latestValidHash, err := srv.NewPayload(ctx, req)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(req)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -87,7 +89,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
resp, err := srv.ExecutionBlockByHash(ctx, arg)
|
||||
resp, err := srv.ExecutionBlockByHash(ctx, arg, true /* with txs */)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
@@ -231,7 +233,9 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -243,7 +247,9 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -255,7 +261,9 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -267,7 +275,9 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -279,7 +289,9 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -385,7 +397,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := service.ExecutionBlockByHash(ctx, arg)
|
||||
resp, err := service.ExecutionBlockByHash(ctx, arg, true /* with txs */)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
@@ -447,7 +459,9 @@ func TestReconstructFullBellatrixBlock(t *testing.T) {
|
||||
jsonPayload["size"] = encodedNum
|
||||
jsonPayload["baseFeePerGas"] = encodedNum
|
||||
|
||||
header, err := bellatrix.PayloadToHeader(payload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(payload)
|
||||
require.NoError(t, err)
|
||||
header, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -478,9 +492,9 @@ func TestReconstructFullBellatrixBlock(t *testing.T) {
|
||||
reconstructed, err := service.ReconstructFullBellatrixBlock(ctx, wrapped)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := reconstructed.Block().Body().ExecutionPayload()
|
||||
got, err := reconstructed.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, payload, got)
|
||||
require.DeepEqual(t, payload, got.Proto())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -983,16 +997,6 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
want *pb.ExecutionPayload
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "nil header fails",
|
||||
args: args{header: nil, block: &pb.ExecutionBlock{}},
|
||||
err: "cannot be nil",
|
||||
},
|
||||
{
|
||||
name: "nil block fails",
|
||||
args: args{header: &pb.ExecutionPayloadHeader{}, block: nil},
|
||||
err: "cannot be nil",
|
||||
},
|
||||
{
|
||||
name: "block hash field in header and block hash mismatch",
|
||||
args: args{
|
||||
@@ -1022,7 +1026,8 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := fullPayloadFromExecutionBlock(tt.args.header, tt.args.block)
|
||||
wrapped, err := wrapper.WrappedExecutionPayloadHeader(tt.args.header)
|
||||
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
|
||||
if (err != nil) && !strings.Contains(err.Error(), tt.err) {
|
||||
t.Fatalf("Wanted err %s got %v", tt.err, err)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("{}")
|
||||
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"terminAlTotAlDiffiCultY\":\"0\"}")
|
||||
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"baseFeePerGas\":\"0x7fffffffffffffff\",\"difficulty\":\"0x7fffffffffffffff\",\"extraData\":\"0x\",\"gasLimit\":\"0xffffffffffffffff\",\"gasUsed\":\"0xffffffffffffffff\",\"hash\":\"0xff01ff01ff01ff01ff01ff01ff01ff01000000000000000000000g0000000000\",\"logsBloom\":\"0x6a756e6b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"miner\":\"0x0000000000000000000000000000000000000000\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"nonce\":\"0x0000000000000000\",\"number\":\"0x7fffffffffffffff\",\"parentHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"receiptsRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"sha3Uncles\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"stateRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"timestamp\":\"0x64\",\"totalDifficulty\":\"999999999999999999999999999999999999999\",\"transactions\":[{\"type\":\"0x2\",\"nonce\":\"0xffffffffffffffff\",\"gasPrice\":null,\"maxPriorityFeePerGas\":\"0x7fffffffffffffff\",\"maxFeePerGas\":\"0x7fffffffffffffff\",\"gas\":\"0xffffffffffffffff\",\"value\":\"0x7fffffffffffffff\",\"input\":\"0x72616e646f6d\",\"v\":\"0x0\",\"r\":\"0x7fffffffffffffff\",\"s\":\"0x7fffffffffffffff\",\"to\":\"0x095e7baea6a6c7c4c2dfeb977efac326af552d87\",\"chainId\":\"0x7fffffffffffffff\",\"accessList\":[],\"hash\":\"0x26db3ef2c0e5945b24088d6a4165d0bb2959abd848b57891aa041b72518548ab\"},{\"type\":\"0x2\",\"nonce\":\"0xffffffffffffffff\",\"gasPrice\":null,\"maxPriorityFeePerGas\":\"0x7fffffffffffffff\",\"maxFeePerGas\":\"0x7fffffffffffffff\",\"gas\":\"0xffffffffffffffff\",\"value\":\"0x7fffffffffffffff\",\"input\":\"0x72616e646f6d\",\"v\":\"0x0\",\"r\":\"0x7fffffffffffffff\",\"s\":\"0x7fffffffffffffff\",\"to\":\"0x095e7baea6a6c7c4c2dfeb977efac326af552d87\",\"chainId\":\"0x7fffffffffffffff\",\"accessList\":[],\"hash\":\"0x26db3ef2c0e5945b24088d6a4165d0bb2959abd848b57891aa041b72518548ab\"}],\"transactionsRoot\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}")
|
||||
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("{}")
|
||||
@@ -0,0 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"0000000000000\":{},\"pAYloAdId\":\"\"}")
|
||||
@@ -18,6 +18,8 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -9,30 +9,34 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
)
|
||||
|
||||
// EngineClient --
|
||||
type EngineClient struct {
|
||||
NewPayloadResp []byte
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
ErrExecBlockByHash error
|
||||
ErrForkchoiceUpdated error
|
||||
ErrNewPayload error
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
OverrideValidHash [32]byte
|
||||
NewPayloadResp []byte
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
ErrExecBlockByHash error
|
||||
ErrForkchoiceUpdated error
|
||||
ErrNewPayload error
|
||||
ExecutionPayloadByBlockHash map[[32]byte]*pb.ExecutionPayload
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
|
||||
NumReconstructedPayloads uint64
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
OverrideValidHash [32]byte
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ *pb.ExecutionPayload) ([]byte, error) {
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData) ([]byte, error) {
|
||||
return e.NewPayloadResp, e.ErrNewPayload
|
||||
}
|
||||
|
||||
@@ -62,7 +66,7 @@ func (e *EngineClient) LatestExecutionBlock(_ context.Context) (*pb.ExecutionBlo
|
||||
}
|
||||
|
||||
// ExecutionBlockByHash --
|
||||
func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash) (*pb.ExecutionBlock, error) {
|
||||
func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash, _ bool) (*pb.ExecutionBlock, error) {
|
||||
b, ok := e.BlockByHashMap[h]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found")
|
||||
@@ -70,6 +74,24 @@ func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash) (*
|
||||
return b, e.ErrExecBlockByHash
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructFullBellatrixBlock(
|
||||
_ context.Context, blindedBlock interfaces.SignedBeaconBlock,
|
||||
) (interfaces.SignedBeaconBlock, error) {
|
||||
if !blindedBlock.Block().IsBlinded() {
|
||||
return nil, errors.New("block must be blinded")
|
||||
}
|
||||
header, err := blindedBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, ok := e.ExecutionPayloadByBlockHash[bytesutil.ToBytes32(header.BlockHash())]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found")
|
||||
}
|
||||
e.NumReconstructedPayloads++
|
||||
return wrapper.BuildSignedBeaconBlockFromExecutionPayload(blindedBlock, payload)
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
@@ -98,7 +120,7 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool,
|
||||
if parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash)
|
||||
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash, false /* with txs */)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ go_library(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
@@ -88,6 +89,7 @@ go_test(
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits/mock:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
@@ -101,6 +103,7 @@ go_test(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
|
||||
@@ -474,8 +474,42 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
if _, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBellatrixBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not reconstruct full execution payload to create signed beacon block: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
bellatrixBlk, err = signedFullBlock.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlk.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.SignedBeaconBlockContainerV2{
|
||||
Message: ðpbv2.SignedBeaconBlockContainerV2_BellatrixBlock{BellatrixBlock: v2Blk},
|
||||
Signature: blk.Signature(),
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
// ErrUnsupportedBellatrixBlock means that we have another block type
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedBellatrixBlock) {
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedBlindedBellatrixBlock) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,12 +9,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
powchaintesting "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/proto/migration"
|
||||
@@ -41,11 +43,10 @@ func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.NewAttestation()
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := au.NewAttestation()
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
@@ -85,11 +86,10 @@ func fillDBTestBlocksAltair(ctx context.Context, t *testing.T, beaconDB db.Datab
|
||||
b := util.NewBeaconBlockAltair()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.NewAttestation()
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := au.NewAttestation()
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
@@ -128,11 +128,10 @@ func fillDBTestBlocksBellatrix(ctx context.Context, t *testing.T, beaconDB db.Da
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.NewAttestation()
|
||||
att1 := util.NewAttestation()
|
||||
att1.Data.Slot = i
|
||||
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
|
||||
att2 := au.NewAttestation()
|
||||
att2 := util.NewAttestation()
|
||||
att2.Data.Slot = i
|
||||
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
|
||||
@@ -1290,6 +1289,9 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
ExecutionPayloadReconstructor: &powchaintesting.EngineClient{
|
||||
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{},
|
||||
},
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
// GetForkSchedule retrieve all scheduled upcoming forks this node is aware of.
|
||||
func (_ *Server) GetForkSchedule(ctx context.Context, _ *emptypb.Empty) (*ethpb.ForkScheduleResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "beacon.GetForkSchedule")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetForkSchedule")
|
||||
defer span.End()
|
||||
|
||||
schedule := params.BeaconConfig().ForkVersionSchedule
|
||||
@@ -57,7 +57,7 @@ func (_ *Server) GetForkSchedule(ctx context.Context, _ *emptypb.Empty) (*ethpb.
|
||||
// - any value starting with 0x in the spec is returned as a hex string.
|
||||
// - all other values are returned as number.
|
||||
func (_ *Server) GetSpec(ctx context.Context, _ *emptypb.Empty) (*ethpb.SpecResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "beacon.GetSpec")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetSpec")
|
||||
defer span.End()
|
||||
|
||||
data, err := prepareConfigSpec()
|
||||
@@ -69,7 +69,7 @@ func (_ *Server) GetSpec(ctx context.Context, _ *emptypb.Empty) (*ethpb.SpecResp
|
||||
|
||||
// GetDepositContract retrieves deposit contract address and genesis fork version.
|
||||
func (_ *Server) GetDepositContract(ctx context.Context, _ *emptypb.Empty) (*ethpb.DepositContractResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "beaconv1.GetDepositContract")
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.GetDepositContract")
|
||||
defer span.End()
|
||||
|
||||
return ðpb.DepositContractResponse{
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
// ListPoolAttestations retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (bs *Server) ListPoolAttestations(ctx context.Context, req *ethpbv1.AttestationsPoolRequest) (*ethpbv1.AttestationsPoolResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "beacon.ListPoolAttestations")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolAttestations")
|
||||
defer span.End()
|
||||
|
||||
attestations := bs.AttestationsPool.AggregatedAttestations()
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
v1alpha1validator "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/statefetcher"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
@@ -21,22 +22,23 @@ import (
|
||||
// Server defines a server implementation of the gRPC Beacon Chain service,
|
||||
// providing RPC endpoints to access data relevant to the Ethereum Beacon Chain.
|
||||
type Server struct {
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlockNotifier blockfeed.Notifier
|
||||
OperationNotifier operation.Notifier
|
||||
Broadcaster p2p.Broadcaster
|
||||
AttestationsPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
VoluntaryExitsPool voluntaryexits.PoolManager
|
||||
StateGenService stategen.StateManager
|
||||
StateFetcher statefetcher.Fetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
V1Alpha1ValidatorServer *v1alpha1validator.Server
|
||||
SyncChecker sync.Checker
|
||||
CanonicalHistory *stategen.CanonicalHistory
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlockNotifier blockfeed.Notifier
|
||||
OperationNotifier operation.Notifier
|
||||
Broadcaster p2p.Broadcaster
|
||||
AttestationsPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
VoluntaryExitsPool voluntaryexits.PoolManager
|
||||
StateGenService stategen.StateManager
|
||||
StateFetcher statefetcher.Fetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
V1Alpha1ValidatorServer *v1alpha1validator.Server
|
||||
SyncChecker sync.Checker
|
||||
CanonicalHistory *stategen.CanonicalHistory
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
ExecutionPayloadReconstructor powchain.ExecutionPayloadReconstructor
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ type stateRequest struct {
|
||||
|
||||
// GetGenesis retrieves details of the chain's genesis which can be used to identify chain.
|
||||
func (bs *Server) GetGenesis(ctx context.Context, _ *emptypb.Empty) (*ethpb.GenesisResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "beacon.GetGenesis")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetGenesis")
|
||||
defer span.End()
|
||||
|
||||
genesisTime := bs.GenesisTimeFetcher.GenesisTime()
|
||||
|
||||
@@ -142,7 +142,7 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.StateReq
|
||||
|
||||
// ListForkChoiceHeads retrieves the leaves of the current fork choice tree.
|
||||
func (ds *Server) ListForkChoiceHeads(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceHeadsResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeads")
|
||||
ctx, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeads")
|
||||
defer span.End()
|
||||
|
||||
headRoots, headSlots := ds.HeadFetcher.ChainHeads()
|
||||
@@ -161,7 +161,7 @@ func (ds *Server) ListForkChoiceHeads(ctx context.Context, _ *emptypb.Empty) (*e
|
||||
|
||||
// ListForkChoiceHeadsV2 retrieves the leaves of the current fork choice tree.
|
||||
func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (*ethpbv2.ForkChoiceHeadsResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeadsV2")
|
||||
ctx, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeadsV2")
|
||||
defer span.End()
|
||||
|
||||
headRoots, headSlots := ds.HeadFetcher.ChainHeads()
|
||||
|
||||
@@ -92,7 +92,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
srv, ctrl, mockStream := setupServer(ctx, t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
wantedAttV1alpha1 := util.NewAttestationUtil().HydrateAttestation(ð.Attestation{
|
||||
wantedAttV1alpha1 := util.HydrateAttestation(ð.Attestation{
|
||||
Data: ð.AttestationData{
|
||||
Slot: 8,
|
||||
},
|
||||
@@ -127,7 +127,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
defer ctrl.Finish()
|
||||
|
||||
wantedAttV1alpha1 := ð.AggregateAttestationAndProof{
|
||||
Aggregate: util.NewAttestationUtil().HydrateAttestation(ð.Attestation{}),
|
||||
Aggregate: util.HydrateAttestation(ð.Attestation{}),
|
||||
}
|
||||
wantedAtt := migration.V1Alpha1AggregateAttAndProofToV1(wantedAttV1alpha1)
|
||||
genericResponse, err := anypb.New(wantedAtt)
|
||||
|
||||
@@ -38,7 +38,7 @@ var (
|
||||
|
||||
// GetIdentity retrieves data about the node's network presence.
|
||||
func (ns *Server) GetIdentity(ctx context.Context, _ *emptypb.Empty) (*ethpb.IdentityResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "node.GetIdentity")
|
||||
ctx, span := trace.StartSpan(ctx, "node.GetIdentity")
|
||||
defer span.End()
|
||||
|
||||
peerId := ns.PeerManager.PeerID().Pretty()
|
||||
@@ -82,7 +82,7 @@ func (ns *Server) GetIdentity(ctx context.Context, _ *emptypb.Empty) (*ethpb.Ide
|
||||
|
||||
// GetPeer retrieves data about the given peer.
|
||||
func (ns *Server) GetPeer(ctx context.Context, req *ethpb.PeerRequest) (*ethpb.PeerResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "node.GetPeer")
|
||||
ctx, span := trace.StartSpan(ctx, "node.GetPeer")
|
||||
defer span.End()
|
||||
|
||||
peerStatus := ns.PeersFetcher.Peers()
|
||||
@@ -135,7 +135,7 @@ func (ns *Server) GetPeer(ctx context.Context, req *ethpb.PeerRequest) (*ethpb.P
|
||||
|
||||
// ListPeers retrieves data about the node's network peers.
|
||||
func (ns *Server) ListPeers(ctx context.Context, req *ethpb.PeersRequest) (*ethpb.PeersResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "node.ListPeers")
|
||||
ctx, span := trace.StartSpan(ctx, "node.ListPeers")
|
||||
defer span.End()
|
||||
|
||||
peerStatus := ns.PeersFetcher.Peers()
|
||||
@@ -231,7 +231,7 @@ func (ns *Server) ListPeers(ctx context.Context, req *ethpb.PeersRequest) (*ethp
|
||||
|
||||
// PeerCount retrieves retrieves number of known peers.
|
||||
func (ns *Server) PeerCount(ctx context.Context, _ *emptypb.Empty) (*ethpb.PeerCountResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "node.PeerCount")
|
||||
ctx, span := trace.StartSpan(ctx, "node.PeerCount")
|
||||
defer span.End()
|
||||
|
||||
peerStatus := ns.PeersFetcher.Peers()
|
||||
@@ -249,7 +249,7 @@ func (ns *Server) PeerCount(ctx context.Context, _ *emptypb.Empty) (*ethpb.PeerC
|
||||
// GetVersion requests that the beacon node identify information about its implementation in a
|
||||
// format similar to a HTTP User-Agent field.
|
||||
func (_ *Server) GetVersion(ctx context.Context, _ *emptypb.Empty) (*ethpb.VersionResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "node.GetVersion")
|
||||
ctx, span := trace.StartSpan(ctx, "node.GetVersion")
|
||||
defer span.End()
|
||||
|
||||
v := fmt.Sprintf("Prysm/%s (%s %s)", version.SemanticVersion(), runtime.GOOS, runtime.GOARCH)
|
||||
@@ -263,7 +263,7 @@ func (_ *Server) GetVersion(ctx context.Context, _ *emptypb.Empty) (*ethpb.Versi
|
||||
// GetSyncStatus requests the beacon node to describe if it's currently syncing or not, and
|
||||
// if it is, what block it is up to.
|
||||
func (ns *Server) GetSyncStatus(ctx context.Context, _ *emptypb.Empty) (*ethpb.SyncingResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "node.GetSyncStatus")
|
||||
ctx, span := trace.StartSpan(ctx, "node.GetSyncStatus")
|
||||
defer span.End()
|
||||
|
||||
headSlot := ns.HeadFetcher.HeadSlot()
|
||||
|
||||
@@ -288,7 +288,7 @@ func (vs *Server) ProduceBlock(ctx context.Context, req *ethpbv1.ProduceBlockReq
|
||||
|
||||
// ProduceBlockV2 requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlockResponseV2, error) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
@@ -352,7 +352,7 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
|
||||
//
|
||||
// The produced block is in SSZ form.
|
||||
func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
@@ -560,7 +560,7 @@ func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.Produ
|
||||
func (vs *Server) PrepareBeaconProposer(
|
||||
ctx context.Context, request *ethpbv1.PrepareBeaconProposerRequest,
|
||||
) (*emptypb.Empty, error) {
|
||||
_, span := trace.StartSpan(ctx, "validator.PrepareBeaconProposer")
|
||||
ctx, span := trace.StartSpan(ctx, "validator.PrepareBeaconProposer")
|
||||
defer span.End()
|
||||
var feeRecipients []common.Address
|
||||
var validatorIndices []types.ValidatorIndex
|
||||
@@ -603,7 +603,7 @@ func (vs *Server) ProduceAttestationData(ctx context.Context, req *ethpbv1.Produ
|
||||
|
||||
// GetAggregateAttestation aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
|
||||
func (vs *Server) GetAggregateAttestation(ctx context.Context, req *ethpbv1.AggregateAttestationRequest) (*ethpbv1.AggregateAttestationResponse, error) {
|
||||
_, span := trace.StartSpan(ctx, "validator.GetAggregateAttestation")
|
||||
ctx, span := trace.StartSpan(ctx, "validator.GetAggregateAttestation")
|
||||
defer span.End()
|
||||
|
||||
allAtts := vs.AttestationsPool.AggregatedAttestations()
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestServer_ListAttestations_Genesis(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(0),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 2,
|
||||
@@ -275,7 +275,7 @@ func TestServer_ListAttestations_Pagination_CustomPageParameters(t *testing.T) {
|
||||
blockExample := util.NewBeaconBlock()
|
||||
blockExample.Block.Slot = i
|
||||
blockExample.Block.Body.Attestations = []*ethpb.Attestation{
|
||||
util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: s,
|
||||
Slot: i,
|
||||
@@ -754,7 +754,7 @@ func TestServer_AttestationPool_Pagination_DefaultPageSize(t *testing.T) {
|
||||
|
||||
atts := make([]*ethpb.Attestation, params.BeaconConfig().DefaultPageSize+1)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
att := util.NewAttestationUtil().NewAttestation()
|
||||
att := util.NewAttestation()
|
||||
att.Data.Slot = types.Slot(i)
|
||||
atts[i] = att
|
||||
}
|
||||
@@ -776,7 +776,7 @@ func TestServer_AttestationPool_Pagination_CustomPageSize(t *testing.T) {
|
||||
numAtts := 100
|
||||
atts := make([]*ethpb.Attestation, numAtts)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
att := util.NewAttestationUtil().NewAttestation()
|
||||
att := util.NewAttestation()
|
||||
att.Data.Slot = types.Slot(i)
|
||||
atts[i] = att
|
||||
}
|
||||
@@ -1031,11 +1031,10 @@ func TestServer_StreamAttestations_OnSlotTick(t *testing.T) {
|
||||
AttestationNotifier: chainService.OperationNotifier(),
|
||||
}
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
atts := []*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b1101}}),
|
||||
}
|
||||
|
||||
mockStream := mock.NewMockBeaconChain_StreamAttestationsServer(ctrl)
|
||||
|
||||
@@ -139,6 +139,12 @@ func convertToBlockContainer(blk interfaces.SignedBeaconBlock, root [32]byte, is
|
||||
return nil, err
|
||||
}
|
||||
ctr.Block = ðpb.BeaconBlockContainer_BellatrixBlock{BellatrixBlock: rBlk}
|
||||
case version.BellatrixBlind:
|
||||
rBlk, err := blk.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctr.Block = ðpb.BeaconBlockContainer_BlindedBellatrixBlock{BlindedBellatrixBlock: rBlk}
|
||||
default:
|
||||
return nil, errors.Errorf("block type is not recognized: %d", blk.Version())
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
@@ -871,13 +872,21 @@ func TestServer_ListBeaconBlocks_Genesis(t *testing.T) {
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
assert.NoError(t, err)
|
||||
blinded, err := wrapped.ToBlinded()
|
||||
assert.NoError(t, err)
|
||||
blindedProto, err := blinded.PbBlindedBellatrixBlock()
|
||||
assert.NoError(t, err)
|
||||
blkContainer := ðpb.BeaconBlockContainer{
|
||||
Block: ðpb.BeaconBlockContainer_BellatrixBlock{BellatrixBlock: blk}}
|
||||
Block: ðpb.BeaconBlockContainer_BlindedBellatrixBlock{BlindedBellatrixBlock: blindedProto}}
|
||||
runListBlocksGenesis(t, wrapped, blkContainer)
|
||||
})
|
||||
}
|
||||
|
||||
func runListBlocksGenesis(t *testing.T, blk interfaces.SignedBeaconBlock, blkContainer *ethpb.BeaconBlockContainer) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOnlyBlindedBeaconBlocks: true,
|
||||
})
|
||||
defer resetFn()
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
@@ -1525,7 +1525,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
atts := []*ethpb.PendingAttestation{{
|
||||
Data: util.NewAttestationUtil().HydrateAttestationData(ðpb.AttestationData{}),
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
InclusionDelay: 1,
|
||||
AggregationBits: bitfield.NewBitlist(validatorCount / uint64(params.BeaconConfig().SlotsPerEpoch)),
|
||||
}}
|
||||
@@ -1607,7 +1607,7 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
|
||||
}
|
||||
|
||||
atts := []*ethpb.PendingAttestation{{
|
||||
Data: util.NewAttestationUtil().HydrateAttestationData(ðpb.AttestationData{}),
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
InclusionDelay: 1,
|
||||
AggregationBits: bitfield.NewBitlist(validatorCount / uint64(params.BeaconConfig().SlotsPerEpoch)),
|
||||
}}
|
||||
@@ -2292,10 +2292,9 @@ func TestServer_GetIndividualVotes_Working(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetValidators(stateWithValidators.Validators()))
|
||||
|
||||
bf := bitfield.NewBitlist(validators / uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.NewAttestation()
|
||||
att1 := util.NewAttestation()
|
||||
att1.AggregationBits = bf
|
||||
att2 := au.NewAttestation()
|
||||
att2 := util.NewAttestation()
|
||||
att2.AggregationBits = bf
|
||||
rt := [32]byte{'A'}
|
||||
att1.Data.Target.Root = rt[:]
|
||||
|
||||
@@ -217,7 +217,7 @@ func generateAtt(state state.ReadOnlyBeaconState, index uint64, privKeys []bls.S
|
||||
aggBits := bitfield.NewBitlist(4)
|
||||
aggBits.SetBitAt(index, true)
|
||||
aggBits.SetBitAt(index+1, true)
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{CommitteeIndex: 1},
|
||||
AggregationBits: aggBits,
|
||||
})
|
||||
@@ -254,7 +254,7 @@ func generateAtt(state state.ReadOnlyBeaconState, index uint64, privKeys []bls.S
|
||||
func generateUnaggregatedAtt(state state.ReadOnlyBeaconState, index uint64, privKeys []bls.SecretKey) (*ethpb.Attestation, error) {
|
||||
aggBits := bitfield.NewBitlist(4)
|
||||
aggBits.SetBitAt(index, true)
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
|
||||
@@ -83,7 +83,7 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
}
|
||||
|
||||
req := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{})
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
wanted := "Incorrect attestation signature"
|
||||
_, err := attesterServer.ProposeAttestation(context.Background(), req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
@@ -107,7 +107,7 @@ func (vs *Server) ProposeBlock(ctx context.Context, rBlk *ethpb.SignedBeaconBloc
|
||||
func (vs *Server) PrepareBeaconProposer(
|
||||
ctx context.Context, request *ethpb.PrepareBeaconProposerRequest,
|
||||
) (*emptypb.Empty, error) {
|
||||
_, span := trace.StartSpan(ctx, "validator.PrepareBeaconProposer")
|
||||
ctx, span := trace.StartSpan(ctx, "validator.PrepareBeaconProposer")
|
||||
defer span.End()
|
||||
var feeRecipients []common.Address
|
||||
var validatorIndices []types.ValidatorIndex
|
||||
|
||||
@@ -78,7 +78,7 @@ func (vs *Server) getAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequ
|
||||
// getSyncAggregate retrieves the sync contributions from the pool to construct the sync aggregate object.
|
||||
// The contributions are filtered based on matching of the input root and slot then profitability.
|
||||
func (vs *Server) getSyncAggregate(ctx context.Context, slot types.Slot, root [32]byte) (*ethpb.SyncAggregate, error) {
|
||||
_, span := trace.StartSpan(ctx, "ProposerServer.getSyncAggregate")
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.getSyncAggregate")
|
||||
defer span.End()
|
||||
|
||||
// Contributions have to match the input root
|
||||
|
||||
@@ -14,22 +14,21 @@ import (
|
||||
)
|
||||
|
||||
func TestProposer_ProposerAtts_sortByProfitability(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
atts := proposerAtts([]*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11000000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11110000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11000000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11000000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11110000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11000000}}),
|
||||
})
|
||||
want := proposerAtts([]*ethpb.Attestation{
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11110000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11000000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11000000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11110000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 4}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 3}, AggregationBits: bitfield.Bitlist{0b11000000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11100000}}),
|
||||
util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b11000000}}),
|
||||
})
|
||||
atts, err := atts.sortByProfitability()
|
||||
if err != nil {
|
||||
@@ -46,7 +45,7 @@ func TestProposer_ProposerAtts_sortByProfitabilityUsingMaxCover(t *testing.T) {
|
||||
getAtts := func(data []testData) proposerAtts {
|
||||
var atts proposerAtts
|
||||
for _, att := range data {
|
||||
atts = append(atts, util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
atts = append(atts, util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: att.slot}, AggregationBits: att.bits}))
|
||||
}
|
||||
return atts
|
||||
@@ -190,11 +189,10 @@ func TestProposer_ProposerAtts_sortByProfitabilityUsingMaxCover(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProposer_ProposerAtts_dedup(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
data1 := au.HydrateAttestationData(ðpb.AttestationData{
|
||||
data1 := util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 4,
|
||||
})
|
||||
data2 := au.HydrateAttestationData(ðpb.AttestationData{
|
||||
data2 := util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Slot: 5,
|
||||
})
|
||||
tests := []struct {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user