Compare commits

...

26 Commits

Author SHA1 Message Date
Potuz
9c13d47f4c fix off by one (#13529) 2024-01-26 00:05:56 +00:00
Justin Traglia
835dce5f6e Enable wastedassign linter & fix findings (#13507)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-01-25 17:07:48 +00:00
james-prysm
c4c28e4825 fixing small typo in error messages (#13525) 2024-01-25 04:56:17 +00:00
Radosław Kapka
c996109b3a Return payload value in Wei from /eth/v3/validator/blocks (#13497)
* Add value in Wei to execution payload

* simplify how payload is returned

* test fix

* fix issues

* review

* fix block handlers
2024-01-24 20:58:35 +00:00
terence
e397f8a2bd Skip origin root when cleaning dirty state (#13521)
* Skip origin root when cleaning dirty state

* Clean up
2024-01-24 17:22:50 +00:00
Radosław Kapka
6438060733 Clear cache everywhere in tests of core helpers (#13509) 2024-01-24 16:11:43 +00:00
Nishant Das
a2892b1ed5 clean up validate beacon block (#13517) 2024-01-24 05:48:15 +00:00
Nishant Das
f4ab2ca79f lower it (#13516) 2024-01-24 01:28:36 +00:00
kasey
dbcf5c29cd moving some blob rpc validation close to peer read (#13511)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-01-23 22:54:16 +00:00
james-prysm
c9fe53bc32 Blob API: make errors more generic (#13513)
* make api response more generic

* gaz
2024-01-23 20:07:46 +00:00
terence
8522febd88 Add Holesky Deneb Epoch (#13506)
* Add Holesky Deneb Epoch

* Fix fork version

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Fix config

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-01-23 19:29:17 +00:00
james-prysm
75a28310c2 fixing route to match specs (#13510) 2024-01-23 18:04:03 +00:00
kasey
1df173e701 Block backfilling (#12968)
* backfill service

* fix bug where origin state is never unlocked

* support mvslice states

* use renamed interface

* refactor db code to skip block cache for backfill

* lint

* add test for verifier.verify

* enable service in service init test

* cancellation cleanup

* adding nil checks to configset juggling

* assume blocks are available by default

As long as we're sure the AvailableBlocker is initialized correctly
during node startup, defaulting to assuming we aren't in a checkpoint
sync simplifies things greatly for tests.

* block saving path refactor and bugfix

* fix fillback test

* fix BackfillStatus init tests

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-01-23 07:54:30 +00:00
terence
3187a05a76 Align aggregated att gossip validations (#13490)
* Align aggregated att gossip validations

* Feedback on reusing existing methods

* Nishant's feedback

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2024-01-23 04:37:06 +00:00
Justin Traglia
4e24102237 Fix minor issue in blsToExecChange validator (#13498)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2024-01-23 03:26:57 +00:00
james-prysm
8dd5e96b29 re-enabling jwt on keymanager API (#13492)
* re-enabling jwt on keymanager API

* adding tests

* Update validator/rpc/intercepter.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* handling error in test

* remove debugging logs

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-01-22 22:16:10 +00:00
james-prysm
4afb379f8d cleanup duties naming (#13451)
* updating some naming to reflect changes to duties

* fixing unit tests

* fixing more tests
2024-01-22 16:58:25 +00:00
Nishant Das
5a2453ac9c Add Debug State Transition Method (#13495)
* add it

* lint
2024-01-22 14:46:20 +00:00
Nishant Das
e610d2a5de fix it (#13496) 2024-01-22 14:26:14 +00:00
Preston Van Loon
233aaf2f9e e2e: Fix multiclient lighthouse flag removal (#13494) 2024-01-21 21:11:11 +00:00
Nishant Das
a49bdcaa1f fix it (#13493) 2024-01-20 16:15:38 +00:00
Gaki
bdd7b2caa9 chore: typo fix (#13461)
* messsage

* cancellation
2024-01-20 01:07:17 +00:00
terence
8de0e3804b Update Sepolia Deneb fork epoch (#13491) 2024-01-19 18:47:07 +00:00
Ying Quan Tan
bfb648067b Re-enable Slasher E2E Test (#13420)
* re-enable e2e slashing test #12415

* refactored slashing evaluator

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2024-01-19 04:44:27 +00:00
terence
852db1f3eb Remove debug setting highest slot log (#13488) 2024-01-19 04:25:15 +00:00
Nishant Das
5d3663ef8d update lighthouse and tests (#13470) 2024-01-19 03:46:36 +00:00
166 changed files with 5092 additions and 2077 deletions

View File

@@ -80,7 +80,6 @@ linters:
- thelper
- unparam
- varnamelen
- wastedassign
- wrapcheck
- wsl

View File

@@ -349,9 +349,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "9f66d8d5644982d3d0d2e3d2b9ebe77a5f96638a5d7fcd715599c32818195cb3",
strip_prefix = "holesky-ea39b9006210848e13f28d92e12a30548cecd41d",
url = "https://github.com/eth-clients/holesky/archive/ea39b9006210848e13f28d92e12a30548cecd41d.tar.gz", # 2023-09-21
sha256 = "5f4be6fd088683ea9db45c863b9c5a1884422449e5b59fd2d561d3ba0f73ffd9",
strip_prefix = "holesky-9d9aabf2d4de51334ee5fed6c79a4d55097d1a43",
url = "https://github.com/eth-clients/holesky/archive/9d9aabf2d4de51334ee5fed6c79a4d55097d1a43.tar.gz", # 2024-01-22
)
http_archive(

View File

@@ -165,7 +165,7 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
}
// BlobKzgCommitments --
@@ -249,7 +249,7 @@ func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
// Header --
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
}
// BlobKzgCommitments --

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net"
"net/http"
"net/url"
@@ -357,7 +358,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadCapella(p, 0)
payload, err := blocks.WrappedExecutionPayloadCapella(p, big.NewInt(0))
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
@@ -394,7 +395,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadDeneb(p, 0)
payload, err := blocks.WrappedExecutionPayloadDeneb(p, big.NewInt(0))
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}

View File

@@ -325,7 +325,10 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
}
// The proposer indices cache takes the target root for the previous
// epoch as key
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e-1)
if e > 0 {
e = e - 1
}
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
if err != nil {
log.WithError(err).Error("could not update proposer index state-root map")
return nil

View File

@@ -911,7 +911,6 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
name: "state older than Bellatrix, nil payload",
stateVersion: 1,
payload: nil,
errString: "attempted to wrap nil",
},
{
name: "state older than Bellatrix, empty payload",
@@ -940,7 +939,6 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
name: "state is Bellatrix, nil payload",
stateVersion: 2,
payload: nil,
errString: "attempted to wrap nil",
},
{
name: "state is Bellatrix, empty payload",

View File

@@ -2,6 +2,7 @@ package testing
import (
"context"
"math/big"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
@@ -54,13 +55,13 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Capella:
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Deneb:
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, 0)
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, big.NewInt(0))
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
}

View File

@@ -115,6 +115,7 @@ func (p *ProposerIndicesCache) IndicesFromCheckpoint(c forkchoicetypes.Checkpoin
root, ok := p.rootMap[c]
p.Unlock()
if !ok {
ProposerIndicesCacheMiss.Inc()
return emptyIndices, ok
}
return p.ProposerIndices(c.Epoch+1, root)

View File

@@ -37,70 +37,69 @@ func TestProposerCache_Set(t *testing.T) {
func TestProposerCache_CheckpointAndPrune(t *testing.T) {
cache := NewProposerIndicesCache()
indices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
root := [32]byte{'a'}
cpRoot := [32]byte{'b'}
copy(indices[3:], []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6})
for i := 1; i < 10; i++ {
root := [32]byte{byte(i)}
cache.Set(primitives.Epoch(i), root, indices)
cpRoot := [32]byte{byte(i - 1)}
cache.SetCheckpoint(forkchoicetypes.Checkpoint{Epoch: primitives.Epoch(i - 1), Root: cpRoot}, root)
}
received, ok := cache.ProposerIndices(1, root)
received, ok := cache.ProposerIndices(1, [32]byte{1})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.ProposerIndices(4, root)
received, ok = cache.ProposerIndices(4, [32]byte{4})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.ProposerIndices(9, root)
received, ok = cache.ProposerIndices(9, [32]byte{9})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: cpRoot})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{3}})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: cpRoot})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: [32]byte{4}})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: cpRoot})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: [32]byte{8}})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
cache.Prune(5)
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
received, ok = cache.ProposerIndices(1, root)
received, ok = cache.ProposerIndices(1, [32]byte{1})
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.ProposerIndices(4, root)
received, ok = cache.ProposerIndices(4, [32]byte{4})
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.ProposerIndices(9, root)
received, ok = cache.ProposerIndices(9, [32]byte{9})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: [32]byte{0}})
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: cpRoot})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{3}})
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: cpRoot})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: [32]byte{4}})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: cpRoot})
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: [32]byte{8}})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
}

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
@@ -609,7 +610,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
payload.PrevRandao = random
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, 0)
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, wrapped)
require.NoError(t, err)
@@ -873,7 +874,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
}, 0)
}, big.NewInt(0))
}
func emptyPayload() *enginev1.ExecutionPayload {

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"math/big"
"math/rand"
"testing"
@@ -642,7 +643,10 @@ func TestProcessBlindWithdrawals(t *testing.T) {
require.NoError(t, err)
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]}, 0)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(
&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]},
big.NewInt(0),
)
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {
@@ -1060,7 +1064,7 @@ func TestProcessWithdrawals(t *testing.T) {
}
st, err := prepareValidators(spb, test.Args)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, 0)
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {

View File

@@ -50,7 +50,6 @@ go_test(
"attestation_test.go",
"beacon_committee_test.go",
"block_test.go",
"main_test.go",
"randao_test.go",
"rewards_penalties_test.go",
"shuffle_test.go",

View File

@@ -20,6 +20,8 @@ import (
func TestAttestation_IsAggregator(t *testing.T) {
t.Run("aggregator", func(t *testing.T) {
helpers.ClearCache()
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, 0, 0)
require.NoError(t, err)
@@ -30,6 +32,8 @@ func TestAttestation_IsAggregator(t *testing.T) {
})
t.Run("not aggregator", func(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
beaconState, privKeys := util.DeterministicGenesisState(t, 2048)
@@ -44,6 +48,8 @@ func TestAttestation_IsAggregator(t *testing.T) {
}
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
helpers.ClearCache()
// Create 10 committees
committeeCount := uint64(10)
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
@@ -204,6 +210,8 @@ func Test_ValidateAttestationTime(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
err := helpers.ValidateAttestationTime(tt.args.attSlot, tt.args.genesisTime,
params.BeaconConfig().MaximumGossipClockDisparityDuration())
if tt.wantedErr != "" {
@@ -216,6 +224,8 @@ func Test_ValidateAttestationTime(t *testing.T) {
}
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
helpers.ClearCache()
// Genesis was 6 epochs ago exactly.
offset := params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot * 6)
genesis := time.Now().Add(-1 * time.Second * time.Duration(offset))
@@ -285,6 +295,8 @@ func TestValidateNilAttestation(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
if tt.errString != "" {
require.ErrorContains(t, tt.errString, helpers.ValidateNilAttestation(tt.attestation))
} else {
@@ -326,6 +338,8 @@ func TestValidateSlotTargetEpoch(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
if tt.errString != "" {
require.ErrorContains(t, tt.errString, helpers.ValidateSlotTargetEpoch(tt.attestation.Data))
} else {

View File

@@ -379,7 +379,7 @@ func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *fork
if cp.Epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
return nil
}
slot, err := slots.EpochEnd(cp.Epoch - 1)
slot, err := slots.EpochEnd(cp.Epoch)
if err != nil {
return err
}

View File

@@ -21,6 +21,8 @@ import (
)
func TestComputeCommittee_WithoutCache(t *testing.T) {
ClearCache()
// Create 10 committees
committeeCount := uint64(10)
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
@@ -71,6 +73,8 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
}
func TestComputeCommittee_RegressionTest(t *testing.T) {
ClearCache()
indices := []primitives.ValidatorIndex{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
index := uint64(215)
@@ -80,6 +84,8 @@ func TestComputeCommittee_RegressionTest(t *testing.T) {
}
func TestVerifyBitfieldLength_OK(t *testing.T) {
ClearCache()
bf := bitfield.Bitlist{0xFF, 0x01}
committeeSize := uint64(8)
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
@@ -91,7 +97,7 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
ClearCache()
defer ClearCache()
epoch := primitives.Epoch(1)
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 0, // Epoch 0.
@@ -103,7 +109,7 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
var activationEpoch primitives.Epoch
@@ -190,10 +196,10 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
},
}
defer ClearCache()
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
ClearCache()
validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
require.NoError(t, err, "Failed to determine CommitteeAssignments")
cac := validatorIndexToCommittee[tt.index]
@@ -209,6 +215,8 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
}
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -239,6 +247,8 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
}
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -259,7 +269,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
ClearCache()
defer ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -380,9 +390,9 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
},
}
defer ClearCache()
for i, tt := range tests {
ClearCache()
require.NoError(t, state.SetSlot(tt.stateSlot))
err := VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
if tt.verificationFailure {
@@ -395,7 +405,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)
@@ -425,7 +435,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)

View File

@@ -60,6 +60,8 @@ func TestBlockRootAtSlot_CorrectBlockRoot(t *testing.T) {
}
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
helpers.ClearCache()
s.Slot = tt.stateSlot
state, err := state_native.InitializeFromProtoPhase0(s)
require.NoError(t, err)
@@ -110,6 +112,8 @@ func TestBlockRootAtSlot_OutOfBounds(t *testing.T) {
},
}
for _, tt := range tests {
helpers.ClearCache()
state.Slot = tt.stateSlot
s, err := state_native.InitializeFromProtoPhase0(state)
require.NoError(t, err)

View File

@@ -1,13 +0,0 @@
package helpers
import (
"os"
"testing"
)
// run ClearCache before each test to prevent cross-test side effects
func TestMain(m *testing.M) {
ClearCache()
code := m.Run()
os.Exit(code)
}

View File

@@ -40,6 +40,8 @@ func TestRandaoMix_OK(t *testing.T) {
},
}
for _, test := range tests {
ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := RandaoMix(state, test.epoch)
require.NoError(t, err)
@@ -74,6 +76,8 @@ func TestRandaoMix_CopyOK(t *testing.T) {
},
}
for _, test := range tests {
ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := RandaoMix(state, test.epoch)
require.NoError(t, err)
@@ -88,6 +92,8 @@ func TestRandaoMix_CopyOK(t *testing.T) {
}
func TestGenerateSeed_OK(t *testing.T) {
ClearCache()
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := 0; i < len(randaoMixes); i++ {
intInBytes := make([]byte, 32)

View File

@@ -14,6 +14,8 @@ import (
)
func TestTotalBalance_OK(t *testing.T) {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
{EffectiveBalance: 32 * 1e9}, {EffectiveBalance: 40 * 1e9},
@@ -27,6 +29,8 @@ func TestTotalBalance_OK(t *testing.T) {
}
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{}})
require.NoError(t, err)
@@ -47,6 +51,8 @@ func TestGetBalance_OK(t *testing.T) {
{i: 2, b: []uint64{0, 0, 0}},
}
for _, test := range tests {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Balances: test.b})
require.NoError(t, err)
assert.Equal(t, test.b[test.i], state.Balances()[test.i], "Incorrect Validator balance")
@@ -62,6 +68,8 @@ func TestTotalActiveBalance(t *testing.T) {
{10000},
}
for _, test := range tests {
ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
validators = append(validators, &ethpb.Validator{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: 1})
@@ -75,8 +83,6 @@ func TestTotalActiveBalance(t *testing.T) {
}
func TestTotalActiveBal_ReturnMin(t *testing.T) {
ClearCache()
defer ClearCache()
tests := []struct {
vCount int
}{
@@ -85,6 +91,8 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
{10000},
}
for _, test := range tests {
ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
validators = append(validators, &ethpb.Validator{EffectiveBalance: 1, ExitEpoch: 1})
@@ -98,8 +106,6 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
}
func TestTotalActiveBalance_WithCache(t *testing.T) {
ClearCache()
defer ClearCache()
tests := []struct {
vCount int
wantCount int
@@ -109,6 +115,8 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
{vCount: 10000, wantCount: 10000},
}
for _, test := range tests {
ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
validators = append(validators, &ethpb.Validator{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: 1})
@@ -133,6 +141,8 @@ func TestIncreaseBalance_OK(t *testing.T) {
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
}
for _, test := range tests {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}},
@@ -157,6 +167,8 @@ func TestDecreaseBalance_OK(t *testing.T) {
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
}
for _, test := range tests {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 3}},
@@ -169,6 +181,8 @@ func TestDecreaseBalance_OK(t *testing.T) {
}
func TestFinalityDelay(t *testing.T) {
ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
beaconState, err := state_native.InitializeFromProtoPhase0(base)
@@ -199,6 +213,8 @@ func TestFinalityDelay(t *testing.T) {
}
func TestIsInInactivityLeak(t *testing.T) {
ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
beaconState, err := state_native.InitializeFromProtoPhase0(base)
@@ -269,6 +285,8 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
{i: 2, b: []uint64{math.MaxUint64, math.MaxUint64, math.MaxUint64}, nb: 33 * 1e9},
}
for _, test := range tests {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
{EffectiveBalance: 4}, {EffectiveBalance: 4}, {EffectiveBalance: 4}},

View File

@@ -13,6 +13,8 @@ import (
)
func TestShuffleList_InvalidValidatorCount(t *testing.T) {
ClearCache()
maxShuffleListSize = 20
list := make([]primitives.ValidatorIndex, 21)
if _, err := ShuffleList(list, [32]byte{123, 125}); err == nil {
@@ -23,6 +25,8 @@ func TestShuffleList_InvalidValidatorCount(t *testing.T) {
}
func TestShuffleList_OK(t *testing.T) {
ClearCache()
var list1 []primitives.ValidatorIndex
seed1 := [32]byte{1, 128, 12}
seed2 := [32]byte{2, 128, 12}
@@ -47,6 +51,8 @@ func TestShuffleList_OK(t *testing.T) {
}
func TestSplitIndices_OK(t *testing.T) {
ClearCache()
var l []uint64
numValidators := uint64(64000)
for i := uint64(0); i < numValidators; i++ {
@@ -61,6 +67,8 @@ func TestSplitIndices_OK(t *testing.T) {
}
func TestShuffleList_Vs_ShuffleIndex(t *testing.T) {
ClearCache()
var list []primitives.ValidatorIndex
listSize := uint64(1000)
seed := [32]byte{123, 42}
@@ -125,6 +133,8 @@ func BenchmarkShuffleList(b *testing.B) {
}
func TestShuffledIndex(t *testing.T) {
ClearCache()
var list []primitives.ValidatorIndex
listSize := uint64(399)
for i := primitives.ValidatorIndex(0); uint64(i) < listSize; i++ {
@@ -147,6 +157,8 @@ func TestShuffledIndex(t *testing.T) {
}
func TestSplitIndicesAndOffset_OK(t *testing.T) {
ClearCache()
var l []uint64
validators := uint64(64000)
for i := uint64(0); i < validators; i++ {

View File

@@ -18,7 +18,7 @@ import (
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -49,7 +49,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -77,7 +77,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -105,7 +105,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -135,6 +135,8 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
}
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -161,6 +163,8 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
}
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -188,7 +192,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -219,7 +223,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -260,7 +264,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -288,7 +292,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -318,6 +322,8 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
}
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -345,7 +351,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
@@ -372,6 +378,8 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
}
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 1,
})
@@ -388,6 +396,8 @@ func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
}
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
LatestBlockHeader: &ethpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
@@ -399,7 +409,7 @@ func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
ClearCache()
defer ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),

View File

@@ -179,8 +179,6 @@ func TestIsSlashableValidator_OK(t *testing.T) {
func TestBeaconProposerIndex_OK(t *testing.T) {
params.SetupTestConfigCleanup(t)
ClearCache()
defer ClearCache()
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
@@ -224,9 +222,9 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
},
}
defer ClearCache()
for _, tt := range tests {
ClearCache()
require.NoError(t, state.SetSlot(tt.slot))
result, err := BeaconProposerIndex(context.Background(), state)
require.NoError(t, err, "Failed to get shard and committees at slot")
@@ -235,9 +233,9 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
}
func TestBeaconProposerIndex_BadState(t *testing.T) {
params.SetupTestConfigCleanup(t)
ClearCache()
defer ClearCache()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
@@ -268,6 +266,8 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
}
func TestComputeProposerIndex_Compatibility(t *testing.T) {
ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
@@ -309,12 +309,16 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
}
func TestDelayedActivationExitEpoch_OK(t *testing.T) {
ClearCache()
epoch := primitives.Epoch(9999)
wanted := epoch + 1 + params.BeaconConfig().MaxSeedLookahead
assert.Equal(t, wanted, ActivationExitEpoch(epoch))
}
func TestActiveValidatorCount_Genesis(t *testing.T) {
ClearCache()
c := 1000
validators := make([]*ethpb.Validator, c)
for i := 0; i < len(validators); i++ {
@@ -348,7 +352,6 @@ func TestChurnLimit_OK(t *testing.T) {
{validatorCount: 1000000, wantedChurn: 15 /* validatorCount/churnLimitQuotient */},
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
}
defer ClearCache()
for _, test := range tests {
ClearCache()
@@ -382,9 +385,6 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
{1000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
}
defer ClearCache()
for _, test := range tests {
ClearCache()
@@ -417,7 +417,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
// Test basic functionality of ActiveValidatorIndices without caching. This test will need to be
// rewritten when releasing some cache flag.
func TestActiveValidatorIndices(t *testing.T) {
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
//farFutureEpoch := params.BeaconConfig().FarFutureEpoch
type args struct {
state *ethpb.BeaconState
epoch primitives.Epoch
@@ -428,7 +428,7 @@ func TestActiveValidatorIndices(t *testing.T) {
want []primitives.ValidatorIndex
wantedErr string
}{
{
/*{
name: "all_active_epoch_10",
args: args{
state: &ethpb.BeaconState{
@@ -559,7 +559,7 @@ func TestActiveValidatorIndices(t *testing.T) {
epoch: 10,
},
want: []primitives.ValidatorIndex{0, 2, 3},
},
},*/
{
name: "impossible_zero_validators", // Regression test for issue #13051
args: args{
@@ -569,22 +569,21 @@ func TestActiveValidatorIndices(t *testing.T) {
},
epoch: 10,
},
wantedErr: "no active validator indices",
wantedErr: "state has nil validator slice",
},
}
defer ClearCache()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
require.NoError(t, err)
require.NoError(t, s.SetValidators(tt.args.state.Validators))
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
return
}
assert.DeepEqual(t, tt.want, got, "ActiveValidatorIndices()")
ClearCache()
})
}
}
@@ -685,6 +684,8 @@ func TestComputeProposerIndex(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
bState := &ethpb.BeaconState{Validators: tt.args.validators}
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
require.NoError(t, err)
@@ -717,6 +718,8 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
assert.Equal(t, tt.want, IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
})
}
@@ -744,6 +747,8 @@ func TestIsIsEligibleForActivation(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
assert.Equal(t, tt.want, IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
@@ -782,6 +787,8 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
}
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
ClearCache()
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
@@ -805,6 +812,8 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
}
func TestProposerIndexFromCheckpoint(t *testing.T) {
ClearCache()
e := primitives.Epoch(2)
r := [32]byte{'a'}
root := [32]byte{'b'}

View File

@@ -202,3 +202,14 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
Root: bRoot,
}, nil
}
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
// value defined:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
func MinEpochsForBlockRequests() primitives.Epoch {
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)
}

View File

@@ -48,6 +48,7 @@ func TestWeakSubjectivity_ComputeWeakSubjectivityPeriod(t *testing.T) {
t.Run(fmt.Sprintf("valCount: %d, avgBalance: %d", tt.valCount, tt.avgBalance), func(t *testing.T) {
// Reset committee cache - as we need to recalculate active validator set for each test.
helpers.ClearCache()
got, err := helpers.ComputeWeakSubjectivityPeriod(context.Background(), genState(t, tt.valCount, tt.avgBalance), params.BeaconConfig())
require.NoError(t, err)
assert.Equal(t, tt.want, got, "valCount: %v, avgBalance: %v", tt.valCount, tt.avgBalance)
@@ -177,6 +178,8 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
sr, _, e := tt.genWsCheckpoint()
got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), sr, e, params.BeaconConfig())
if tt.wantedErr != "" {
@@ -247,6 +250,8 @@ func TestWeakSubjectivity_ParseWeakSubjectivityInputString(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
helpers.ClearCache()
wsCheckpt, err := helpers.ParseWeakSubjectivityInputString(tt.input)
if tt.wantedErr != "" {
require.ErrorContains(t, tt.wantedErr, err)
@@ -281,3 +286,21 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
return beaconState
}
func TestMinEpochsForBlockRequests(t *testing.T) {
helpers.ClearCache()
params.SetActiveTestCleanup(t, params.MainnetConfig())
var expected primitives.Epoch = 33024
// expected value of 33024 via spec commentary:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
//
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// + MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
// )
//
// Where MAX_SAFETY_DECAY = 100 and thus MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024 (~5 months).
require.Equal(t, expected, helpers.MinEpochsForBlockRequests())
}

View File

@@ -22,9 +22,6 @@ var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
// as equivalent to db.ErrNotFound.
func IsNotFound(err error) bool {

View File

@@ -13,9 +13,11 @@ go_library(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/slasher/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/backup:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
],

View File

@@ -11,9 +11,11 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
slashertypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/monitoring/backup"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -57,7 +59,7 @@ type ReadOnlyDatabase interface {
// origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
}
// NoHeadAccessDatabase defines a struct without access to chain head data.
@@ -68,6 +70,7 @@ type NoHeadAccessDatabase interface {
DeleteBlock(ctx context.Context, root [32]byte) error
SaveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
// State related methods.
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
@@ -106,9 +109,10 @@ type HeadAccessDatabase interface {
SaveGenesisData(ctx context.Context, state state.BeaconState) error
EnsureEmbeddedGenesis(ctx context.Context) error
// initialization method needed for origin checkpoint sync
// Support for checkpoint sync and backfill.
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
}
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"archived_point.go",
"backfill.go",
"backup.go",
"blocks.go",
"checkpoint.go",
@@ -48,6 +49,7 @@ go_library(
"//io/file:go_default_library",
"//monitoring/progress:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
@@ -73,6 +75,7 @@ go_test(
name = "go_default_test",
srcs = [
"archived_point_test.go",
"backfill_test.go",
"backup_test.go",
"blocks_test.go",
"checkpoint_test.go",
@@ -107,6 +110,7 @@ go_test(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/dbval:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/testing:go_default_library",

View File

@@ -0,0 +1,44 @@
package kv
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
// SaveBackfillStatus encodes the given BackfillStatus protobuf struct and writes it to a single key in the db.
// This value is used by the backfill service to keep track of the range of blocks that need to be synced. It is also used by the
// code that serves blocks or regenerates states to keep track of what range of blocks are available.
func (s *Store) SaveBackfillStatus(ctx context.Context, bf *dbval.BackfillStatus) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
defer span.End()
bfb, err := proto.Marshal(bf)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillStatusKey, bfb)
})
}
// BackfillStatus retrieves the most recently saved version of the BackfillStatus protobuf struct.
// This is used to persist information about backfill status across restarts.
func (s *Store) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillStatus")
defer span.End()
bf := &dbval.BackfillStatus{}
err := s.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
bs := bucket.Get(backfillStatusKey)
if len(bs) == 0 {
return errors.Wrap(ErrNotFound, "BackfillStatus not found")
}
return proto.Unmarshal(bs, bf)
})
return bf, err
}

View File

@@ -0,0 +1,35 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"google.golang.org/protobuf/proto"
)
func TestBackfillRoundtrip(t *testing.T) {
db := setupDB(t)
b := &dbval.BackfillStatus{}
b.LowSlot = 23
b.LowRoot = bytesutil.PadTo([]byte("low"), 32)
b.LowParentRoot = bytesutil.PadTo([]byte("parent"), 32)
m, err := proto.Marshal(b)
require.NoError(t, err)
ub := &dbval.BackfillStatus{}
require.NoError(t, proto.Unmarshal(m, ub))
require.Equal(t, b.LowSlot, ub.LowSlot)
require.DeepEqual(t, b.LowRoot, ub.LowRoot)
require.DeepEqual(t, b.LowParentRoot, ub.LowParentRoot)
ctx := context.Background()
require.NoError(t, db.SaveBackfillStatus(ctx, b))
dbub, err := db.BackfillStatus(ctx)
require.NoError(t, err)
require.Equal(t, b.LowSlot, dbub.LowSlot)
require.DeepEqual(t, b.LowRoot, dbub.LowRoot)
require.DeepEqual(t, b.LowParentRoot, dbub.LowParentRoot)
}

View File

@@ -70,25 +70,6 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
return root, err
}
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
rootSlice := bkt.Get(backfillBlockRootKey)
if len(rootSlice) == 0 {
return ErrNotFoundBackfillBlockRoot
}
root = bytesutil.ToBytes32(rootSlice)
return nil
})
return root, err
}
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
@@ -292,55 +273,95 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.ReadOnlySigned
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlocks")
defer span.End()
// Performing marshaling, hashing, and indexing outside the bolt transaction
// to minimize the time we hold the DB lock.
blockRoots := make([][]byte, len(blks))
encodedBlocks := make([][]byte, len(blks))
indicesForBlocks := make([]map[string][]byte, len(blks))
for i, blk := range blks {
blockRoot, err := blk.Block().HashTreeRoot()
robs := make([]blocks.ROBlock, len(blks))
for i := range blks {
rb, err := blocks.NewROBlock(blks[i])
if err != nil {
return err
return errors.Wrapf(err, "failed to make an ROBlock for a block in SaveBlocks")
}
enc, err := s.marshalBlock(ctx, blk)
if err != nil {
return err
}
blockRoots[i] = blockRoot[:]
encodedBlocks[i] = enc
indicesByBucket := createBlockIndicesFromBlock(ctx, blk.Block())
indicesForBlocks[i] = indicesByBucket
robs[i] = rb
}
saveBlinded, err := s.shouldSaveBlinded(ctx)
return s.SaveROBlocks(ctx, robs, true)
}
type blockBatchEntry struct {
root []byte
block interfaces.ReadOnlySignedBeaconBlock
enc []byte
updated bool
indices map[string][]byte
}
func prepareBlockBatch(blks []blocks.ROBlock, shouldBlind bool) ([]blockBatchEntry, error) {
batch := make([]blockBatchEntry, len(blks))
for i := range blks {
batch[i].root, batch[i].block = blks[i].RootSlice(), blks[i].ReadOnlySignedBeaconBlock
batch[i].indices = blockIndices(batch[i].block.Block().Slot(), batch[i].block.Block().ParentRoot())
if shouldBlind {
blinded, err := batch[i].block.ToBlinded()
if err != nil {
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
return nil, errors.Wrapf(err, "could not convert block to blinded format for root %#x", batch[i].root)
}
// Pre-deneb blocks give ErrUnsupportedVersion; use the full block already in the batch entry.
} else {
batch[i].block = blinded
}
}
enc, err := encodeBlock(batch[i].block)
if err != nil {
return nil, errors.Wrapf(err, "failed to encode block for root %#x", batch[i].root)
}
batch[i].enc = enc
}
return batch, nil
}
func (s *Store) SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error {
shouldBlind, err := s.shouldSaveBlinded(ctx)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
// Precompute expensive values outside the db transaction.
batch, err := prepareBlockBatch(blks, shouldBlind)
if err != nil {
return errors.Wrap(err, "failed to encode all blocks in batch for saving to the db")
}
err = s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
for i, blk := range blks {
if existingBlock := bkt.Get(blockRoots[i]); existingBlock != nil {
for i := range batch {
if exists := bkt.Get(batch[i].root); exists != nil {
continue
}
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
return errors.Wrap(err, "could not update DB indices")
if err := bkt.Put(batch[i].root, batch[i].enc); err != nil {
return errors.Wrapf(err, "could write block to db with root %#x", batch[i].root)
}
if saveBlinded {
blindedBlock, err := blk.ToBlinded()
if err != nil {
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
return err
}
} else {
blk = blindedBlock
}
}
s.blockCache.Set(string(blockRoots[i]), blk, int64(len(encodedBlocks[i])))
if err := bkt.Put(blockRoots[i], encodedBlocks[i]); err != nil {
return err
if err := updateValueForIndices(ctx, batch[i].indices, batch[i].root, tx); err != nil {
return errors.Wrapf(err, "could not update DB indices for root %#x", batch[i].root)
}
batch[i].updated = true
}
return nil
})
if !cache {
return err
}
for i := range batch {
if batch[i].updated {
s.blockCache.Set(string(batch[i].root), batch[i].block, int64(len(batch[i].enc)))
}
}
return err
}
// blockIndices takes in a beacon block and returns
// a map of bolt DB index buckets corresponding to each particular key for indices for
// data, such as (shard indices bucket -> shard 5).
func blockIndices(slot primitives.Slot, parentRoot [32]byte) map[string][]byte {
return map[string][]byte{
string(blockSlotIndicesBucket): bytesutil.SlotToBytesBigEndian(slot),
string(blockParentRootIndicesBucket): parentRoot[:],
}
}
// SaveHeadBlockRoot to the db.
@@ -417,17 +438,6 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
})
}
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
// the node was initialized via checkpoint sync.
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillBlockRootKey, blockRoot[:])
})
}
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance
@@ -726,31 +736,6 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot primitives.Slot) ([
return [][32]byte{}, nil
}
// createBlockIndicesFromBlock takes in a beacon block and returns
// a map of bolt DB index buckets corresponding to each particular key for indices for
// data, such as (shard indices bucket -> shard 5).
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.ReadOnlyBeaconBlock) map[string][]byte {
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
defer span.End()
indicesByBucket := make(map[string][]byte)
// Every index has a unique bucket for fast, binary-search
// range scans for filtering across keys.
buckets := [][]byte{
blockSlotIndicesBucket,
}
indices := [][]byte{
bytesutil.SlotToBytesBigEndian(block.Slot()),
}
buckets = append(buckets, blockParentRootIndicesBucket)
parentRoot := block.ParentRoot()
indices = append(indices, parentRoot[:])
for i := 0; i < len(buckets); i++ {
indicesByBucket[string(buckets[i])] = indices[i]
}
return indicesByBucket
}
// createBlockFiltersFromIndices takes in filter criteria and returns
// a map with a single key-value pair: "block-parent-root-indices” -> parentRoot (array of bytes).
//
@@ -838,74 +823,44 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
return blocks.NewSignedBeaconBlock(rawBlock)
}
func (s *Store) marshalBlock(
ctx context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
shouldBlind, err := s.shouldSaveBlinded(ctx)
func encodeBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
key, err := keyForBlock(blk)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "could not determine version encoding key for block")
}
if shouldBlind {
return marshalBlockBlinded(ctx, blk)
enc, err := blk.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal block")
}
return marshalBlockFull(ctx, blk)
dbfmt := make([]byte, len(key)+len(enc))
if len(key) > 0 {
copy(dbfmt, key)
}
copy(dbfmt[len(key):], enc)
return snappy.Encode(nil, dbfmt), nil
}
// Encodes a full beacon block to the DB with its associated key.
func marshalBlockFull(
_ context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
var encodedBlock []byte
var err error
encodedBlock, err = blk.MarshalSSZ()
if err != nil {
return nil, err
}
func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
switch blk.Version() {
case version.Deneb:
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
case version.Capella:
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
case version.Bellatrix:
return snappy.Encode(nil, append(bellatrixKey, encodedBlock...)), nil
case version.Altair:
return snappy.Encode(nil, append(altairKey, encodedBlock...)), nil
case version.Phase0:
return snappy.Encode(nil, encodedBlock), nil
default:
return nil, errors.New("unknown block version")
}
}
// Encodes a blinded beacon block with its associated key.
// If the block does not support blinding, we then encode it as a full
// block with its associated key by calling marshalBlockFull.
func marshalBlockBlinded(
ctx context.Context,
blk interfaces.ReadOnlySignedBeaconBlock,
) ([]byte, error) {
blindedBlock, err := blk.ToBlinded()
if err != nil {
switch {
case errors.Is(err, blocks.ErrUnsupportedVersion):
return marshalBlockFull(ctx, blk)
default:
return nil, errors.Wrap(err, "could not convert block to blinded format")
if blk.IsBlinded() {
return denebBlindKey, nil
}
}
encodedBlock, err := blindedBlock.MarshalSSZ()
if err != nil {
return nil, errors.Wrap(err, "could not marshal blinded block")
}
switch blk.Version() {
case version.Deneb:
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
return denebKey, nil
case version.Capella:
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
if blk.IsBlinded() {
return capellaBlindKey, nil
}
return capellaKey, nil
case version.Bellatrix:
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
if blk.IsBlinded() {
return bellatrixBlindKey, nil
}
return bellatrixKey, nil
case version.Altair:
return altairKey, nil
case version.Phase0:
return nil, nil
default:
return nil, fmt.Errorf("unsupported block version: %v", blk.Version())
}

View File

@@ -126,23 +126,6 @@ var blockTests = []struct {
},
}
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
_, err := db.BackfillBlockRoot(ctx)
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
var expected [32]byte
copy(expected[:], []byte{0x23})
err = db.SaveBackfillBlockRoot(ctx, expected)
require.NoError(t, err)
actual, err := db.BackfillBlockRoot(ctx)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
BlockCacheSize = 1
slot := primitives.Slot(20)

View File

@@ -21,3 +21,8 @@ var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
var errNotConnectedToFinalized = errors.New("unable to finalize backfill blocks, finalized parent_root does not match")

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -163,6 +164,83 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
return bkt.Put(previousFinalizedCheckpointKey, enc)
}
// BackfillFinalizedIndex updates the finalized index for a contiguous chain of blocks that are the ancestors of the
// given finalized child root. This is needed to update the finalized index during backfill, because the usual
// updateFinalizedBlockRoots has assumptions that are incompatible with backfill processing.
func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillFinalizedIndex")
defer span.End()
if len(blocks) == 0 {
return errEmptyBlockSlice
}
fbrs := make([]*ethpb.FinalizedBlockRootContainer, len(blocks))
encs := make([][]byte, len(blocks))
for i := range blocks {
pr := blocks[i].Block().ParentRoot()
fbrs[i] = &ethpb.FinalizedBlockRootContainer{
ParentRoot: pr[:],
// ChildRoot: will be filled in on the next iteration when we look at the descendent block.
}
if i == 0 {
continue
}
if blocks[i-1].Root() != blocks[i].Block().ParentRoot() {
return errors.Wrapf(errIncorrectBlockParent, "previous root=%#x, slot=%d; child parent_root=%#x, root=%#x, slot=%d",
blocks[i-1].Root(), blocks[i-1].Block().Slot(), blocks[i].Block().ParentRoot(), blocks[i].Root(), blocks[i].Block().Slot())
}
// We know the previous index is the parent of this one thanks to the assertion above,
// so we can set the ChildRoot of the previous value to the root of the current value.
fbrs[i-1].ChildRoot = blocks[i].RootSlice()
// Now that the value for fbrs[i-1] is complete, perform encoding here to minimize time in Update,
// which holds the global db lock.
penc, err := encode(ctx, fbrs[i-1])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[i-1] = penc
// The final element is the parent of finalizedChildRoot. This is checked inside the db transaction using
// the parent_root value stored in the index data for finalizedChildRoot.
if i == len(blocks)-1 {
fbrs[i].ChildRoot = finalizedChildRoot[:]
// Final element is complete, so it is pre-encoded like the others.
enc, err := encode(ctx, fbrs[i])
if err != nil {
tracing.AnnotateError(span, err)
return err
}
encs[i] = enc
}
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
child := bkt.Get(finalizedChildRoot[:])
if len(child) == 0 {
return errFinalizedChildNotFound
}
fcc := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, child, fcc); err != nil {
return errors.Wrapf(err, "unable to decode finalized block root container for root=%#x", finalizedChildRoot)
}
// Ensure that the existing finalized chain descends from the new segment.
if !bytes.Equal(fcc.ParentRoot, blocks[len(blocks)-1].RootSlice()) {
return errors.Wrapf(errNotConnectedToFinalized, "finalized block root container for root=%#x has parent_root=%#x, not %#x",
finalizedChildRoot, fcc.ParentRoot, blocks[len(blocks)-1].RootSlice())
}
// Update the finalized index with entries for each block in the new segment.
for i := range fbrs {
if err := bkt.Put(blocks[i].RootSlice(), encs[i]); err != nil {
return err
}
}
return nil
})
}
// IsFinalizedBlock returns true if the block root is present in the finalized block root index.
// A beacon block root contained exists in this index if it is considered finalized and canonical.
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are

View File

@@ -1,6 +1,7 @@
package kv
import (
"bytes"
"context"
"testing"
@@ -14,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
bolt "go.etcd.io/bbolt"
)
var genesisBlockRoot = bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'})
@@ -234,3 +236,64 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte)
}
return ifaceBlocks
}
func TestStore_BackfillFinalizedIndex(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{}, [32]byte{}), errEmptyBlockSlice)
blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 66, [32]byte{}))
require.NoError(t, err)
// set up existing finalized block
ebpr := blks[64].Block().ParentRoot()
ebr := blks[64].Root()
chldr := blks[65].Root()
ebf := &ethpb.FinalizedBlockRootContainer{
ParentRoot: ebpr[:],
ChildRoot: chldr[:],
}
disjoint := []consensusblocks.ROBlock{
blks[0],
blks[2],
}
enc, err := encode(ctx, ebf)
require.NoError(t, err)
err = db.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
return bkt.Put(ebr[:], enc)
})
// reslice to remove the existing blocks
blks = blks[0:64]
// check the other error conditions with a descendent root that really doesn't exist
require.NoError(t, err)
require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, disjoint, [32]byte{}), errIncorrectBlockParent)
require.NoError(t, err)
require.ErrorIs(t, errFinalizedChildNotFound, db.BackfillFinalizedIndex(ctx, blks, [32]byte{}))
// use the real root so that it succeeds
require.NoError(t, db.BackfillFinalizedIndex(ctx, blks, ebr))
for i := range blks {
require.NoError(t, db.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
encfr := bkt.Get(blks[i].RootSlice())
require.Equal(t, true, len(encfr) > 0)
fr := &ethpb.FinalizedBlockRootContainer{}
require.NoError(t, decode(ctx, encfr, fr))
require.Equal(t, 32, len(fr.ParentRoot))
require.Equal(t, 32, len(fr.ChildRoot))
pr := blks[i].Block().ParentRoot()
require.Equal(t, true, bytes.Equal(fr.ParentRoot, pr[:]))
if i > 0 {
require.Equal(t, true, bytes.Equal(fr.ParentRoot, blks[i-1].RootSlice()))
}
if i < len(blks)-1 {
require.DeepEqual(t, fr.ChildRoot, blks[i+1].RootSlice())
}
if i == len(blks)-1 {
require.DeepEqual(t, fr.ChildRoot, ebr[:])
}
return nil
}))
}
}

View File

@@ -1,6 +1,7 @@
package kv
import (
"bytes"
"context"
"fmt"
"testing"
@@ -116,13 +117,25 @@ func Test_setupBlockStorageType(t *testing.T) {
root, err = wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
retrievedBlk, err = store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
wrappedBlinded, err := wrappedBlock.ToBlinded()
require.NoError(t, err)
require.DeepEqual(t, wrappedBlinded, retrievedBlk)
retrievedBlk, err = store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, true, retrievedBlk.IsBlinded())
// Compare retrieved value by root, and marshaled bytes.
mSrc, err := wrappedBlinded.MarshalSSZ()
require.NoError(t, err)
mTgt, err := retrievedBlk.MarshalSSZ()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(mSrc, mTgt))
rSrc, err := wrappedBlinded.Block().HashTreeRoot()
require.NoError(t, err)
rTgt, err := retrievedBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.Equal(t, rSrc, rTgt)
})
t.Run("existing database with full blocks type should continue storing full blocks", func(t *testing.T) {
store := setupDB(t)
@@ -155,10 +168,21 @@ func Test_setupBlockStorageType(t *testing.T) {
root, err = wrappedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, store.SaveBlock(ctx, wrappedBlock))
retrievedBlk, err = store.Block(ctx, root)
require.NoError(t, err)
require.Equal(t, false, retrievedBlk.IsBlinded())
require.DeepEqual(t, wrappedBlock, retrievedBlk)
// Compare retrieved value by root, and marshaled bytes.
mSrc, err := wrappedBlock.MarshalSSZ()
require.NoError(t, err)
mTgt, err := retrievedBlk.MarshalSSZ()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(mSrc, mTgt))
rTgt, err := retrievedBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.Equal(t, root, rTgt)
})
t.Run("existing database with blinded blocks type should error if user enables full blocks feature flag", func(t *testing.T) {
store := setupDB(t)

View File

@@ -61,8 +61,8 @@ var (
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
backfillBlockRootKey = []byte("backfill-block-root")
// tracking data about an ongoing backfill
backfillStatusKey = []byte("backfill-status")
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
lastArchivedIndexKey = []byte("last-archived")

View File

@@ -893,6 +893,7 @@ func createStateIndicesFromStateSlot(ctx context.Context, slot primitives.Slot)
//
// 3.) state with current finalized root
// 4.) unfinalized States
// 5.) not origin root
func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB. CleanUpDirtyStates")
defer span.End()
@@ -907,6 +908,11 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
}
deletedRoots := make([][32]byte, 0)
oRoot, err := s.OriginCheckpointBlockRoot(ctx)
if err != nil {
return err
}
err = s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(stateSlotIndicesBucket)
return bkt.ForEach(func(k, v []byte) error {
@@ -914,15 +920,31 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
return ctx.Err()
}
finalizedChkpt := bytesutil.ToBytes32(f.Root) == bytesutil.ToBytes32(v)
root := bytesutil.ToBytes32(v)
slot := bytesutil.BytesToSlotBigEndian(k)
mod := slot % slotsPerArchivedPoint
nonFinalized := slot > finalizedSlot
// The following conditions cover 1, 2, 3 and 4 above.
if mod != 0 && mod <= slotsPerArchivedPoint-slotsPerArchivedPoint/3 && !finalizedChkpt && !nonFinalized {
deletedRoots = append(deletedRoots, bytesutil.ToBytes32(v))
if mod == 0 {
return nil
}
if mod > slotsPerArchivedPoint-slotsPerArchivedPoint/3 {
return nil
}
if bytesutil.ToBytes32(f.Root) == root {
return nil
}
if slot > finalizedSlot {
return nil
}
if oRoot == root {
return nil
}
deletedRoots = append(deletedRoots, root)
return nil
})
})

View File

@@ -3,6 +3,7 @@ package kv
import (
"context"
"encoding/binary"
"math/big"
"math/rand"
"strconv"
"testing"
@@ -99,7 +100,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
BlockHash: make([]byte, 32),
TransactionsRoot: make([]byte, 32),
WithdrawalsRoot: make([]byte, 32),
}, 0)
}, big.NewInt(0))
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
return st
@@ -124,7 +125,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
BlockHash: make([]byte, 32),
TransactionsRoot: make([]byte, 32),
WithdrawalsRoot: make([]byte, 32),
}, 0)
}, big.NewInt(0))
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
return st
@@ -675,6 +676,7 @@ func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) {
genesisRoot := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'}))
bRoots := make([][32]byte, 0)
slotsPerArchivedPoint := primitives.Slot(128)
@@ -720,6 +722,7 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) {
genesisRoot := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'}))
for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
b := util.NewBeaconBlock()
@@ -741,6 +744,35 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) {
require.Equal(t, true, db.HasState(context.Background(), genesisRoot))
}
func TestStore_CleanUpDirtyStates_OriginRoot(t *testing.T) {
db := setupDB(t)
genesisState, err := util.NewBeaconState()
require.NoError(t, err)
r := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), r))
require.NoError(t, db.SaveState(context.Background(), genesisState, r))
for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
b := util.NewBeaconBlock()
b.Block.Slot = i
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(context.Background(), wsb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(i))
require.NoError(t, db.SaveState(context.Background(), st, r))
}
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), r))
require.NoError(t, db.CleanUpDirtyStates(context.Background(), params.BeaconConfig().SlotsPerEpoch))
require.Equal(t, true, db.HasState(context.Background(), r))
}
func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
db := setupDB(t)
@@ -749,6 +781,7 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
genesisRoot := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'}))
var unfinalizedRoots [][32]byte
for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {

View File

@@ -8,6 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -17,18 +18,6 @@ import (
// syncing, using the provided values as their point of origin. This is an alternative
// to syncing from genesis, and should only be run on an empty database.
func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error {
genesisRoot, err := s.GenesisBlockRoot(ctx)
if err != nil {
if errors.Is(err, ErrNotFoundGenesisBlockRoot) {
return errors.Wrap(err, "genesis block root not found: genesis must be provided for checkpoint sync")
}
return errors.Wrap(err, "genesis block root query error: checkpoint sync must verify genesis to proceed")
}
err = s.SaveBackfillBlockRoot(ctx, genesisRoot)
if err != nil {
return errors.Wrap(err, "unable to save genesis root as initial backfill starting point for checkpoint sync")
}
cf, err := detect.FromState(serState)
if err != nil {
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
@@ -50,11 +39,24 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
}
blk := wblk.Block()
// save block
blockRoot, err := blk.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
}
pr := blk.ParentRoot()
bf := &dbval.BackfillStatus{
LowSlot: uint64(wblk.Block().Slot()),
LowRoot: blockRoot[:],
LowParentRoot: pr[:],
OriginRoot: blockRoot[:],
OriginSlot: uint64(wblk.Block().Slot()),
}
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
}
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
if err := s.SaveBlock(ctx, wblk); err != nil {
return errors.Wrap(err, "could not save checkpoint block")

View File

@@ -113,7 +113,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
cursorTime := latestBlkTime
numOfBlocks := uint64(0)
var numOfBlocks uint64
estimatedBlk := cursorNum.Uint64()
maxTimeBuffer := searchThreshold * params.BeaconConfig().SecondsPerETH1Block
// Terminate if we can't find an acceptable block after

View File

@@ -260,7 +260,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
if err != nil {
return nil, nil, false, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToGwei(result.Value))
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToWei(result.Value))
if err != nil {
return nil, nil, false, err
}
@@ -273,7 +273,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
if err != nil {
return nil, nil, false, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToGwei(result.Value))
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToWei(result.Value))
if err != nil {
return nil, nil, false, err
}
@@ -734,7 +734,7 @@ func fullPayloadFromExecutionBlock(
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
case version.Deneb:
ebg, err := header.ExcessBlobGas()
if err != nil {
@@ -763,7 +763,7 @@ func fullPayloadFromExecutionBlock(
Withdrawals: block.Withdrawals,
BlobGasUsed: bgu,
ExcessBlobGas: ebg,
}, 0) // We can't get the block value and don't care about the block value for this instance
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
default:
return nil, fmt.Errorf("unknown execution block version %d", block.Version)
}
@@ -811,7 +811,7 @@ func fullPayloadFromPayloadBody(
BlockHash: header.BlockHash(),
Transactions: body.Transactions,
Withdrawals: body.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
case version.Deneb:
ebg, err := header.ExcessBlobGas()
if err != nil {
@@ -840,7 +840,7 @@ func fullPayloadFromPayloadBody(
Withdrawals: body.Withdrawals,
ExcessBlobGas: ebg,
BlobGasUsed: bgu,
}, 0) // We can't get the block value and don't care about the block value for this instance
}, big.NewInt(0)) // We can't get the block value and don't care about the block value for this instance
default:
return nil, fmt.Errorf("unknown execution block version for payload %d", bVersion)
}

View File

@@ -127,7 +127,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
req, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, 0)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, big.NewInt(0))
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.NoError(t, err)
@@ -476,7 +476,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.NoError(t, err)
@@ -490,7 +490,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.NoError(t, err)
@@ -518,7 +518,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
@@ -532,7 +532,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
@@ -560,7 +560,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
@@ -574,7 +574,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
@@ -602,7 +602,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV2Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
@@ -616,7 +616,7 @@ func TestClient_HTTP(t *testing.T) {
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, big.NewInt(0))
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
@@ -1537,7 +1537,7 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
p, err := blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
BlockHash: wantedHash[:],
Transactions: [][]byte{},
}, 0)
}, big.NewInt(0))
require.NoError(t, err)
return p
},
@@ -1545,7 +1545,7 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, 0)
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, big.NewInt(0))
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
if err != nil {
@@ -1598,7 +1598,7 @@ func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
p, err := blocks.WrappedExecutionPayloadDeneb(&pb.ExecutionPayloadDeneb{
BlockHash: wantedHash[:],
Transactions: [][]byte{},
}, 0)
}, big.NewInt(0))
require.NoError(t, err)
return p
},
@@ -1606,7 +1606,7 @@ func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, 0)
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, big.NewInt(0))
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
if err != nil {

View File

@@ -23,7 +23,6 @@ go_library(
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",

View File

@@ -14,7 +14,6 @@ import (
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -63,14 +62,14 @@ func (e *EngineClient) ForkchoiceUpdated(
// GetPayload --
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, math.Gwei(e.BlockValue))
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, big.NewInt(int64(e.BlockValue)))
if err != nil {
return nil, nil, false, err
}
return ed, e.BlobsBundle, e.BuilderOverride, nil
}
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, math.Gwei(e.BlockValue))
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, big.NewInt(int64(e.BlockValue)))
if err != nil {
return nil, nil, false, err
}

View File

@@ -40,6 +40,7 @@ go_library(
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/rpc:go_default_library",
"//beacon-chain/slasher:go_default_library",
"//beacon-chain/startup:go_default_library",
@@ -47,6 +48,7 @@ go_library(
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//beacon-chain/sync/backfill:go_default_library",
"//beacon-chain/sync/backfill/coverage:go_default_library",
"//beacon-chain/sync/checkpoint:go_default_library",
"//beacon-chain/sync/genesis:go_default_library",
"//beacon-chain/sync/initial-sync:go_default_library",

View File

@@ -42,6 +42,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/slasher"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
@@ -49,6 +50,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
regularsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/checkpoint"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/genesis"
initialsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync"
@@ -113,6 +115,7 @@ type BeaconNode struct {
CheckpointInitializer checkpoint.Initializer
forkChoicer forkchoice.ForkChoicer
clockWaiter startup.ClockWaiter
BackfillOpts []backfill.ServiceOption
initialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
blobRetentionEpochs primitives.Epoch
@@ -215,10 +218,23 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return nil, err
}
bfs := backfill.NewStatus(beacon.db)
if err := bfs.Reload(ctx); err != nil {
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "backfill status initialization error")
}
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
bf, err := backfill.NewService(ctx, bfs, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := beacon.services.RegisterService(bf); err != nil {
return nil, errors.Wrap(err, "error registering backfill service")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
@@ -233,11 +249,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return nil, err
@@ -534,8 +545,8 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
return nil
}
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
opts := []stategen.Option{stategen.WithBackfillStatus(bfs)}
func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBlocker, fc forkchoice.ForkChoicer) error {
opts := []stategen.Option{stategen.WithAvailableBlocker(bfs)}
sg := stategen.New(b.db, fc, opts...)
cp, err := b.db.FinalizedCheckpoint(ctx)

View File

@@ -83,7 +83,7 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
decodedData, err := encoder.DecodeSnappy(pmsg.Data, gossipPubSubSize)
if err != nil {
totalLength, err := math.AddInt(
len(params.BeaconConfig().MessageDomainValidSnappy),
len(params.BeaconConfig().MessageDomainInvalidSnappy),
len(topicLenBytes),
topicLen,
len(pmsg.Data),

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"assigner.go",
"log.go",
"status.go",
],
@@ -12,8 +13,10 @@ go_library(
"//cmd:__subpackages__",
],
deps = [
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -28,6 +31,7 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
@@ -36,6 +40,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"assigner_test.go",
"benchmark_test.go",
"peers_test.go",
"status_test.go",

View File

@@ -0,0 +1,78 @@
package peers
import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/sirupsen/logrus"
)
// FinalizedCheckpointer describes the minimum capability that Assigner needs from forkchoice.
// That is, the ability to retrieve the latest finalized checkpoint to help with peer evaluation.
type FinalizedCheckpointer interface {
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
}
// NewAssigner assists in the correct construction of an Assigner by code in other packages,
// assuring all the important private member fields are given values.
// The FinalizedCheckpointer is used to retrieve the latest finalized checkpoint each time peers are requested.
// Peers that report an older finalized checkpoint are filtered out.
func NewAssigner(s *Status, fc FinalizedCheckpointer) *Assigner {
return &Assigner{
ps: s,
fc: fc,
}
}
// Assigner uses the "BestFinalized" peer scoring method to pick the next-best peer to receive rpc requests.
type Assigner struct {
ps *Status
fc FinalizedCheckpointer
}
// ErrInsufficientSuitable is a sentinel error, signaling that a peer couldn't be assigned because there are currently
// not enough peers that match our selection criteria to serve rpc requests. It is the responsibility of the caller to
// look for this error and continue to try calling Assign with appropriate backoff logic.
var ErrInsufficientSuitable = errors.New("no suitable peers")
func (a *Assigner) freshPeers() ([]peer.ID, error) {
required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.fc.FinalizedCheckpoint().Epoch)
if len(peers) < required {
log.WithFields(logrus.Fields{
"suitable": len(peers),
"required": required}).Warn("Unable to assign peer while suitable peers < required ")
return nil, ErrInsufficientSuitable
}
return peers, nil
}
// Assign uses the "BestFinalized" method to select the best peers that agree on a canonical block
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
// the number of outbound requests to each peer from a given component.
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
best, err := a.freshPeers()
if err != nil {
return nil, err
}
return pickBest(busy, n, best), nil
}
func pickBest(busy map[peer.ID]bool, n int, best []peer.ID) []peer.ID {
ps := make([]peer.ID, 0, n)
for _, p := range best {
if len(ps) == n {
return ps
}
if !busy[p] {
ps = append(ps, p)
}
}
return ps
}

View File

@@ -0,0 +1,114 @@
package peers
import (
"fmt"
"testing"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestPickBest(t *testing.T) {
best := testPeerIds(10)
cases := []struct {
name string
busy map[peer.ID]bool
n int
best []peer.ID
expected []peer.ID
}{
{
name: "",
n: 0,
},
{
name: "none busy",
n: 1,
expected: best[0:1],
},
{
name: "all busy except last",
n: 1,
busy: testBusyMap(best[0 : len(best)-1]),
expected: best[len(best)-1:],
},
{
name: "all busy except i=5",
n: 1,
busy: testBusyMap(append(append([]peer.ID{}, best[0:5]...), best[6:]...)),
expected: []peer.ID{best[5]},
},
{
name: "all busy - 0 results",
n: 1,
busy: testBusyMap(best),
},
{
name: "first half busy",
n: 5,
busy: testBusyMap(best[0:5]),
expected: best[5:],
},
{
name: "back half busy",
n: 5,
busy: testBusyMap(best[5:]),
expected: best[0:5],
},
{
name: "pick all ",
n: 10,
expected: best,
},
{
name: "none available",
n: 10,
best: []peer.ID{},
},
{
name: "not enough",
n: 10,
best: best[0:1],
expected: best[0:1],
},
{
name: "not enough, some busy",
n: 10,
best: best[0:6],
busy: testBusyMap(best[0:5]),
expected: best[5:6],
},
}
for _, c := range cases {
name := fmt.Sprintf("n=%d", c.n)
if c.name != "" {
name += " " + c.name
}
t.Run(name, func(t *testing.T) {
if c.best == nil {
c.best = best
}
pb := pickBest(c.busy, c.n, c.best)
require.Equal(t, len(c.expected), len(pb))
for i := range c.expected {
require.Equal(t, c.expected[i], pb[i])
}
})
}
}
func testBusyMap(b []peer.ID) map[peer.ID]bool {
m := make(map[peer.ID]bool)
for i := range b {
m[b[i]] = true
}
return m
}
func testPeerIds(n int) []peer.ID {
pids := make([]peer.ID, n)
for i := range pids {
pids[i] = peer.ID(fmt.Sprintf("%d", i))
}
return pids
}

View File

@@ -471,7 +471,7 @@ func TestGetForkChoice(t *testing.T) {
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
s := &Server{ForkchoiceFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/fork_choice", nil)
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/debug/fork_choice", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}

View File

@@ -559,7 +559,7 @@ func (b *SignedBlindedBeaconBlockBellatrix) ToGeneric() (*eth.GenericSignedBeaco
Block: bl,
Signature: sig,
}
return &eth.GenericSignedBeaconBlock{Block: &eth.GenericSignedBeaconBlock_BlindedBellatrix{BlindedBellatrix: block}, IsBlinded: true, PayloadValue: 0 /* can't get payload value from blinded block */}, nil
return &eth.GenericSignedBeaconBlock{Block: &eth.GenericSignedBeaconBlock_BlindedBellatrix{BlindedBellatrix: block}, IsBlinded: true}, nil
}
func (b *BlindedBeaconBlockBellatrix) ToGeneric() (*eth.GenericBeaconBlock, error) {
@@ -567,7 +567,7 @@ func (b *BlindedBeaconBlockBellatrix) ToGeneric() (*eth.GenericBeaconBlock, erro
if err != nil {
return nil, err
}
return &eth.GenericBeaconBlock{Block: &eth.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: block}, IsBlinded: true, PayloadValue: 0 /* can't get payload value from blinded block */}, nil
return &eth.GenericBeaconBlock{Block: &eth.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: block}, IsBlinded: true}, nil
}
func (b *BlindedBeaconBlockBellatrix) ToConsensus() (*eth.BlindedBeaconBlockBellatrix, error) {
@@ -1016,7 +1016,7 @@ func (b *SignedBlindedBeaconBlockCapella) ToGeneric() (*eth.GenericSignedBeaconB
Block: bl,
Signature: sig,
}
return &eth.GenericSignedBeaconBlock{Block: &eth.GenericSignedBeaconBlock_BlindedCapella{BlindedCapella: block}, IsBlinded: true, PayloadValue: 0 /* can't get payload value from blinded block */}, nil
return &eth.GenericSignedBeaconBlock{Block: &eth.GenericSignedBeaconBlock_BlindedCapella{BlindedCapella: block}, IsBlinded: true}, nil
}
func (b *BlindedBeaconBlockCapella) ToGeneric() (*eth.GenericBeaconBlock, error) {
@@ -1024,7 +1024,7 @@ func (b *BlindedBeaconBlockCapella) ToGeneric() (*eth.GenericBeaconBlock, error)
if err != nil {
return nil, err
}
return &eth.GenericBeaconBlock{Block: &eth.GenericBeaconBlock_BlindedCapella{BlindedCapella: block}, IsBlinded: true, PayloadValue: 0 /* can't get payload value from blinded block */}, nil
return &eth.GenericBeaconBlock{Block: &eth.GenericBeaconBlock_BlindedCapella{BlindedCapella: block}, IsBlinded: true}, nil
}
func (b *BlindedBeaconBlockCapella) ToConsensus() (*eth.BlindedBeaconBlockCapella, error) {

View File

@@ -31,11 +31,11 @@ func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBe
return false
}
if err != nil {
httputil.HandleError(w, "Could not get block from block ID: %s"+err.Error(), http.StatusInternalServerError)
httputil.HandleError(w, "Could not get block from block ID: "+err.Error(), http.StatusInternalServerError)
return false
}
if err = blocks.BeaconBlockIsNil(blk); err != nil {
httputil.HandleError(w, "Could not find requested block: %s"+err.Error(), http.StatusNotFound)
httputil.HandleError(w, "Could not find requested block: "+err.Error(), http.StatusNotFound)
return false
}
return true

View File

@@ -223,7 +223,7 @@ func (s *Server) produceBlockV3(ctx context.Context, w http.ResponseWriter, r *h
}
w.Header().Set(api.ExecutionPayloadBlindedHeader, fmt.Sprintf("%v", v1alpha1resp.IsBlinded))
w.Header().Set(api.ExecutionPayloadValueHeader, fmt.Sprintf("%d", v1alpha1resp.PayloadValue))
w.Header().Set(api.ExecutionPayloadValueHeader, v1alpha1resp.PayloadValue)
w.Header().Set(api.ConsensusBlockValueHeader, consensusBlockValue)
phase0Block, ok := v1alpha1resp.Block.(*eth.GenericBeaconBlock_Phase0)
@@ -310,7 +310,7 @@ func handleProducePhase0V3(
w http.ResponseWriter,
isSSZ bool,
blk *eth.GenericBeaconBlock_Phase0,
payloadValue uint64,
payloadValue string,
) {
if isSSZ {
sszResp, err := blk.Phase0.MarshalSSZ()
@@ -329,8 +329,8 @@ func handleProducePhase0V3(
httputil.WriteJson(w, &ProduceBlockV3Response{
Version: version.String(version.Phase0),
ExecutionPayloadBlinded: false,
ExecutionPayloadValue: fmt.Sprintf("%d", payloadValue), // mev not available at this point
ConsensusBlockValue: "", // rewards not applicable before altair
ExecutionPayloadValue: payloadValue, // mev not available at this point
ConsensusBlockValue: "", // rewards not applicable before altair
Data: jsonBytes,
})
}
@@ -339,7 +339,7 @@ func handleProduceAltairV3(
w http.ResponseWriter,
isSSZ bool,
blk *eth.GenericBeaconBlock_Altair,
executionPayloadValue uint64,
executionPayloadValue string,
consensusPayloadValue string,
) {
if isSSZ {
@@ -359,7 +359,7 @@ func handleProduceAltairV3(
httputil.WriteJson(w, &ProduceBlockV3Response{
Version: version.String(version.Altair),
ExecutionPayloadBlinded: false,
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue), // mev not available at this point
ExecutionPayloadValue: executionPayloadValue, // mev not available at this point
ConsensusBlockValue: consensusPayloadValue,
Data: jsonBytes,
})
@@ -369,7 +369,7 @@ func handleProduceBellatrixV3(
w http.ResponseWriter,
isSSZ bool,
blk *eth.GenericBeaconBlock_Bellatrix,
executionPayloadValue uint64,
executionPayloadValue string,
consensusPayloadValue string,
) {
if isSSZ {
@@ -394,7 +394,7 @@ func handleProduceBellatrixV3(
httputil.WriteJson(w, &ProduceBlockV3Response{
Version: version.String(version.Bellatrix),
ExecutionPayloadBlinded: false,
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue), // mev not available at this point
ExecutionPayloadValue: executionPayloadValue, // mev not available at this point
ConsensusBlockValue: consensusPayloadValue,
Data: jsonBytes,
})
@@ -404,7 +404,7 @@ func handleProduceBlindedBellatrixV3(
w http.ResponseWriter,
isSSZ bool,
blk *eth.GenericBeaconBlock_BlindedBellatrix,
executionPayloadValue uint64,
executionPayloadValue string,
consensusPayloadValue string,
) {
if isSSZ {
@@ -429,7 +429,7 @@ func handleProduceBlindedBellatrixV3(
httputil.WriteJson(w, &ProduceBlockV3Response{
Version: version.String(version.Bellatrix),
ExecutionPayloadBlinded: true,
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue),
ExecutionPayloadValue: executionPayloadValue,
ConsensusBlockValue: consensusPayloadValue,
Data: jsonBytes,
})
@@ -439,7 +439,7 @@ func handleProduceBlindedCapellaV3(
w http.ResponseWriter,
isSSZ bool,
blk *eth.GenericBeaconBlock_BlindedCapella,
executionPayloadValue uint64,
executionPayloadValue string,
consensusPayloadValue string,
) {
if isSSZ {
@@ -464,7 +464,7 @@ func handleProduceBlindedCapellaV3(
httputil.WriteJson(w, &ProduceBlockV3Response{
Version: version.String(version.Capella),
ExecutionPayloadBlinded: true,
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue),
ExecutionPayloadValue: executionPayloadValue,
ConsensusBlockValue: consensusPayloadValue,
Data: jsonBytes,
})
@@ -474,7 +474,7 @@ func handleProduceCapellaV3(
w http.ResponseWriter,
isSSZ bool,
blk *eth.GenericBeaconBlock_Capella,
executionPayloadValue uint64,
executionPayloadValue string,
consensusPayloadValue string,
) {
if isSSZ {
@@ -499,7 +499,7 @@ func handleProduceCapellaV3(
httputil.WriteJson(w, &ProduceBlockV3Response{
Version: version.String(version.Capella),
ExecutionPayloadBlinded: false,
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue), // mev not available at this point
ExecutionPayloadValue: executionPayloadValue, // mev not available at this point
ConsensusBlockValue: consensusPayloadValue,
Data: jsonBytes,
})
@@ -509,7 +509,7 @@ func handleProduceBlindedDenebV3(
w http.ResponseWriter,
isSSZ bool,
blk *eth.GenericBeaconBlock_BlindedDeneb,
executionPayloadValue uint64,
executionPayloadValue string,
consensusPayloadValue string,
) {
if isSSZ {
@@ -534,7 +534,7 @@ func handleProduceBlindedDenebV3(
httputil.WriteJson(w, &ProduceBlockV3Response{
Version: version.String(version.Deneb),
ExecutionPayloadBlinded: true,
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue),
ExecutionPayloadValue: executionPayloadValue,
ConsensusBlockValue: consensusPayloadValue,
Data: jsonBytes,
})
@@ -544,7 +544,7 @@ func handleProduceDenebV3(
w http.ResponseWriter,
isSSZ bool,
blk *eth.GenericBeaconBlock_Deneb,
executionPayloadValue uint64,
executionPayloadValue string,
consensusBlockValue string,
) {
if isSSZ {
@@ -570,7 +570,7 @@ func handleProduceDenebV3(
httputil.WriteJson(w, &ProduceBlockV3Response{
Version: version.String(version.Deneb),
ExecutionPayloadBlinded: false,
ExecutionPayloadValue: fmt.Sprintf("%d", executionPayloadValue), // mev not available at this point
ExecutionPayloadValue: executionPayloadValue, // mev not available at this point
ConsensusBlockValue: consensusBlockValue,
Data: jsonBytes,
})

View File

@@ -64,7 +64,7 @@ func TestProduceBlockV2(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"phase0","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"phase0","execution_payload_blinded":false,"execution_payload_value":"","consensus_block_value":"","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "phase0", writer.Header().Get(api.VersionHeader))
@@ -84,7 +84,6 @@ func TestProduceBlockV2(t *testing.T) {
SkipMevBoost: true,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
}())
server := &Server{
@@ -98,7 +97,7 @@ func TestProduceBlockV2(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"altair","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"altair","execution_payload_blinded":false,"execution_payload_value":"","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "altair", writer.Header().Get(api.VersionHeader))
@@ -118,7 +117,10 @@ func TestProduceBlockV2(t *testing.T) {
SkipMevBoost: true,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -132,7 +134,7 @@ func TestProduceBlockV2(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
@@ -184,7 +186,10 @@ func TestProduceBlockV2(t *testing.T) {
SkipMevBoost: true,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -198,7 +203,7 @@ func TestProduceBlockV2(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"capella","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"capella","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "capella", writer.Header().Get(api.VersionHeader))
@@ -216,10 +221,7 @@ func TestProduceBlockV2(t *testing.T) {
SkipMevBoost: true,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
g, err := block.Message.ToGeneric()
require.NoError(t, err)
g.PayloadValue = 2000 //some fake value
return g, err
return block.Message.ToGeneric()
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -253,7 +255,10 @@ func TestProduceBlockV2(t *testing.T) {
SkipMevBoost: true,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.ToUnsigned().ToGeneric()
b, err := block.ToUnsigned().ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -267,7 +272,7 @@ func TestProduceBlockV2(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV2(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
@@ -566,7 +571,7 @@ func TestProduceBlockV2SSZ(t *testing.T) {
func() (*eth.GenericBeaconBlock, error) {
g, err := block.Message.ToGeneric()
require.NoError(t, err)
g.PayloadValue = 2000 //some fake value
g.PayloadValue = "2000"
return g, err
}())
server := &Server{
@@ -780,7 +785,10 @@ func TestProduceBlindedBlock(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -794,7 +802,7 @@ func TestProduceBlindedBlock(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlindedBlock(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":true,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":true,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
@@ -848,7 +856,7 @@ func TestProduceBlindedBlock(t *testing.T) {
func() (*eth.GenericBeaconBlock, error) {
g, err := block.Message.ToGeneric()
require.NoError(t, err)
g.PayloadValue = 2000 //some fake value
g.PayloadValue = "2000"
return g, err
}())
server := &Server{
@@ -915,7 +923,10 @@ func TestProduceBlindedBlock(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -929,7 +940,7 @@ func TestProduceBlindedBlock(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlindedBlock(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":true,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":true,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
@@ -1027,11 +1038,11 @@ func TestProduceBlockV3(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV3(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"phase0","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"phase0","execution_payload_blinded":false,"execution_payload_value":"","consensus_block_value":"","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "phase0", writer.Header().Get(api.VersionHeader))
require.Equal(t, "", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1063,11 +1074,11 @@ func TestProduceBlockV3(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV3(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"altair","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"altair","execution_payload_blinded":false,"execution_payload_value":"","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "altair", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1085,7 +1096,10 @@ func TestProduceBlockV3(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -1098,11 +1112,11 @@ func TestProduceBlockV3(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV3(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1120,7 +1134,10 @@ func TestProduceBlockV3(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -1133,11 +1150,11 @@ func TestProduceBlockV3(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV3(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":true,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"bellatrix","execution_payload_blinded":true,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "true", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1155,7 +1172,10 @@ func TestProduceBlockV3(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -1168,11 +1188,11 @@ func TestProduceBlockV3(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV3(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"capella","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"capella","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "capella", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1192,7 +1212,7 @@ func TestProduceBlockV3(t *testing.T) {
func() (*eth.GenericBeaconBlock, error) {
g, err := block.Message.ToGeneric()
require.NoError(t, err)
g.PayloadValue = 2000 //some fake value
g.PayloadValue = "2000"
return g, err
}())
server := &Server{
@@ -1228,7 +1248,10 @@ func TestProduceBlockV3(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.ToUnsigned().ToGeneric()
b, err := block.ToUnsigned().ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -1241,11 +1264,11 @@ func TestProduceBlockV3(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV3(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":false,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":false,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1263,7 +1286,10 @@ func TestProduceBlockV3(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -1276,11 +1302,11 @@ func TestProduceBlockV3(t *testing.T) {
writer.Body = &bytes.Buffer{}
server.ProduceBlockV3(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":true,"execution_payload_value":"0","consensus_block_value":"10","data":%s}`, string(jsonBytes))
want := fmt.Sprintf(`{"version":"deneb","execution_payload_blinded":true,"execution_payload_value":"2000","consensus_block_value":"10","data":%s}`, string(jsonBytes))
body := strings.ReplaceAll(writer.Body.String(), "\n", "")
require.Equal(t, want, body)
require.Equal(t, "true", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1383,7 +1409,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
require.NoError(t, err)
require.Equal(t, string(ssz), writer.Body.String())
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "phase0", writer.Header().Get(api.VersionHeader))
require.Equal(t, "", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1421,7 +1447,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
require.NoError(t, err)
require.Equal(t, string(ssz), writer.Body.String())
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "altair", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1437,7 +1463,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
mockChainService := &blockchainTesting.ChainService{}
server := &Server{
@@ -1460,7 +1489,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
require.NoError(t, err)
require.Equal(t, string(ssz), writer.Body.String())
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1476,7 +1505,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -1498,7 +1530,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
require.NoError(t, err)
require.Equal(t, string(ssz), writer.Body.String())
require.Equal(t, "true", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "bellatrix", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1514,7 +1546,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -1536,7 +1571,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
require.NoError(t, err)
require.Equal(t, string(ssz), writer.Body.String())
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "capella", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1554,7 +1589,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
func() (*eth.GenericBeaconBlock, error) {
g, err := block.Message.ToGeneric()
require.NoError(t, err)
g.PayloadValue = 2000 //some fake value
g.PayloadValue = "2000"
return g, err
}())
server := &Server{
@@ -1593,7 +1628,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.ToUnsigned().ToGeneric()
b, err := block.ToUnsigned().ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -1615,7 +1653,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
require.NoError(t, err)
require.Equal(t, string(ssz), writer.Body.String())
require.Equal(t, "false", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})
@@ -1631,7 +1669,10 @@ func TestProduceBlockV3SSZ(t *testing.T) {
SkipMevBoost: false,
}).Return(
func() (*eth.GenericBeaconBlock, error) {
return block.Message.ToGeneric()
b, err := block.Message.ToGeneric()
require.NoError(t, err)
b.PayloadValue = "2000"
return b, nil
}())
server := &Server{
V1Alpha1Server: v1alpha1Server,
@@ -1653,7 +1694,7 @@ func TestProduceBlockV3SSZ(t *testing.T) {
require.NoError(t, err)
require.Equal(t, string(ssz), writer.Body.String())
require.Equal(t, "true", writer.Header().Get(api.ExecutionPayloadBlindedHeader))
require.Equal(t, "0", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "2000", writer.Header().Get(api.ExecutionPayloadValueHeader))
require.Equal(t, "deneb", writer.Header().Get(api.VersionHeader))
require.Equal(t, "10", writer.Header().Get(api.ConsensusBlockValueHeader))
})

View File

@@ -1198,14 +1198,14 @@ func TestGetAttestationData(t *testing.T) {
func TestProduceSyncCommitteeContribution(t *testing.T) {
root := bytesutil.PadTo([]byte("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), 32)
sig := bls.NewAggregateSignature().Marshal()
messsage := &ethpbalpha.SyncCommitteeMessage{
message := &ethpbalpha.SyncCommitteeMessage{
Slot: 1,
BlockRoot: root,
ValidatorIndex: 0,
Signature: sig,
}
syncCommitteePool := synccommittee.NewStore()
require.NoError(t, syncCommitteePool.SaveSyncCommitteeMessage(messsage))
require.NoError(t, syncCommitteePool.SaveSyncCommitteeMessage(message))
server := Server{
CoreService: &core.Service{
HeadFetcher: &mockChain.ChainService{

View File

@@ -24,6 +24,7 @@ go_library(
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -19,6 +19,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
)
// BlockIdParseError represents an error scenario where a block ID could not be parsed.
@@ -210,7 +211,10 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
if len(indices) == 0 {
m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root))
if err != nil {
return nil, &core.RpcError{Err: errors.Wrapf(err, "could not retrieve blob indices for root %#x", root), Reason: core.Internal}
log.WithFields(log.Fields{
"block root": hexutil.Encode(root),
}).Error(errors.Wrapf(err, "could not retrieve blob indices for root %#x", root))
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob indices for root %#x", root), Reason: core.Internal}
}
for k, v := range m {
if v {
@@ -223,7 +227,11 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
for i, index := range indices {
vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), index)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrapf(err, "could not retrieve blob for block root %#x at index %d", root, index), Reason: core.Internal}
log.WithFields(log.Fields{
"block root": hexutil.Encode(root),
"blob index": index,
}).Error(errors.Wrapf(err, "could not retrieve blob for block root %#x at index %d", root, index))
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index), Reason: core.Internal}
}
blobs[i] = &vblob
}

View File

@@ -4,7 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"aggregator.go",
"assignments.go",
"duties.go",
"attester.go",
"blocks.go",
"construct_generic_block.go",
@@ -76,6 +76,7 @@ go_library(
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
@@ -178,7 +179,7 @@ go_test(
timeout = "moderate",
srcs = [
"aggregator_test.go",
"assignments_test.go",
"duties_test.go",
"attester_test.go",
"blocks_test.go",
"construct_generic_block_test.go",

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/math"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
@@ -22,7 +23,7 @@ func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock,
}
isBlinded := sBlk.IsBlinded()
payloadValue := sBlk.ValueInGwei()
payloadValue := sBlk.ValueInWei()
switch sBlk.Version() {
case version.Deneb:
@@ -41,30 +42,30 @@ func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock,
}
// Helper functions for constructing blocks for each version
func (vs *Server) constructDenebBlock(blockProto proto.Message, isBlinded bool, payloadValue uint64, bundle *enginev1.BlobsBundle) *ethpb.GenericBeaconBlock {
func (vs *Server) constructDenebBlock(blockProto proto.Message, isBlinded bool, payloadValue math.Wei, bundle *enginev1.BlobsBundle) *ethpb.GenericBeaconBlock {
if isBlinded {
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_BlindedDeneb{BlindedDeneb: blockProto.(*ethpb.BlindedBeaconBlockDeneb)}, IsBlinded: true, PayloadValue: payloadValue}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_BlindedDeneb{BlindedDeneb: blockProto.(*ethpb.BlindedBeaconBlockDeneb)}, IsBlinded: true, PayloadValue: (*payloadValue).String()}
}
denebContents := &ethpb.BeaconBlockContentsDeneb{Block: blockProto.(*ethpb.BeaconBlockDeneb)}
if bundle != nil {
denebContents.KzgProofs = bundle.Proofs
denebContents.Blobs = bundle.Blobs
}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Deneb{Deneb: denebContents}, IsBlinded: false, PayloadValue: payloadValue}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Deneb{Deneb: denebContents}, IsBlinded: false, PayloadValue: (*payloadValue).String()}
}
func (vs *Server) constructCapellaBlock(pb proto.Message, isBlinded bool, payloadValue uint64) *ethpb.GenericBeaconBlock {
func (vs *Server) constructCapellaBlock(pb proto.Message, isBlinded bool, payloadValue math.Wei) *ethpb.GenericBeaconBlock {
if isBlinded {
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}, IsBlinded: true, PayloadValue: payloadValue}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}, IsBlinded: true, PayloadValue: (*payloadValue).String()}
}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Capella{Capella: pb.(*ethpb.BeaconBlockCapella)}, IsBlinded: false, PayloadValue: payloadValue}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Capella{Capella: pb.(*ethpb.BeaconBlockCapella)}, IsBlinded: false, PayloadValue: (*payloadValue).String()}
}
func (vs *Server) constructBellatrixBlock(pb proto.Message, isBlinded bool, payloadValue uint64) *ethpb.GenericBeaconBlock {
func (vs *Server) constructBellatrixBlock(pb proto.Message, isBlinded bool, payloadValue math.Wei) *ethpb.GenericBeaconBlock {
if isBlinded {
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: pb.(*ethpb.BlindedBeaconBlockBellatrix)}, IsBlinded: true, PayloadValue: payloadValue}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: pb.(*ethpb.BlindedBeaconBlockBellatrix)}, IsBlinded: true, PayloadValue: (*payloadValue).String()}
}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Bellatrix{Bellatrix: pb.(*ethpb.BeaconBlockBellatrix)}, IsBlinded: false, PayloadValue: payloadValue}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Bellatrix{Bellatrix: pb.(*ethpb.BeaconBlockBellatrix)}, IsBlinded: false, PayloadValue: (*payloadValue).String()}
}
func (vs *Server) constructAltairBlock(pb proto.Message) *ethpb.GenericBeaconBlock {

View File

@@ -46,7 +46,7 @@ func TestServer_setExecutionData(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
capellaTransitionState, _ := util.DeterministicGenesisStateCapella(t, 1)
wrappedHeaderCapella, err := blocks.WrappedExecutionPayloadHeaderCapella(&v1.ExecutionPayloadHeaderCapella{BlockNumber: 1}, 0)
wrappedHeaderCapella, err := blocks.WrappedExecutionPayloadHeaderCapella(&v1.ExecutionPayloadHeaderCapella{BlockNumber: 1}, big.NewInt(0))
require.NoError(t, err)
require.NoError(t, capellaTransitionState.SetLatestExecutionPayloadHeader(wrappedHeaderCapella))
b2pbCapella := util.NewBeaconBlockCapella()
@@ -59,7 +59,7 @@ func TestServer_setExecutionData(t *testing.T) {
require.NoError(t, beaconDB.SaveFeeRecipientsByValidatorIDs(context.Background(), []primitives.ValidatorIndex{0}, []common.Address{{}}))
denebTransitionState, _ := util.DeterministicGenesisStateDeneb(t, 1)
wrappedHeaderDeneb, err := blocks.WrappedExecutionPayloadHeaderDeneb(&v1.ExecutionPayloadHeaderDeneb{BlockNumber: 2}, 0)
wrappedHeaderDeneb, err := blocks.WrappedExecutionPayloadHeaderDeneb(&v1.ExecutionPayloadHeaderDeneb{BlockNumber: 2}, big.NewInt(0))
require.NoError(t, err)
require.NoError(t, denebTransitionState.SetLatestExecutionPayloadHeader(wrappedHeaderDeneb))
b2pbDeneb := util.NewBeaconBlockDeneb()
@@ -356,7 +356,7 @@ func TestServer_setExecutionData(t *testing.T) {
t.Run("Builder configured. Local block has higher value", func(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 2}
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 2 * 1e9}
b := blk.Block()
localPayload, _, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
@@ -377,7 +377,7 @@ func TestServer_setExecutionData(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 1}
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 1 * 1e9}
b := blk.Block()
localPayload, _, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
@@ -748,7 +748,7 @@ func TestServer_getPayloadHeader(t *testing.T) {
require.DeepEqual(t, want, h)
}
if tc.returnedHeaderCapella != nil {
want, err := blocks.WrappedExecutionPayloadHeaderCapella(tc.returnedHeaderCapella, 0) // value is a mock
want, err := blocks.WrappedExecutionPayloadHeaderCapella(tc.returnedHeaderCapella, big.NewInt(197121)) // value is a mock
require.NoError(t, err)
require.DeepEqual(t, want, h)
}
@@ -805,7 +805,7 @@ func Test_matchingWithdrawalsRoot(t *testing.T) {
})
t.Run("could not get builder withdrawals root", func(t *testing.T) {
local := &v1.ExecutionPayloadCapella{}
p, err := blocks.WrappedExecutionPayloadCapella(local, 0)
p, err := blocks.WrappedExecutionPayloadCapella(local, big.NewInt(0))
require.NoError(t, err)
header := &v1.ExecutionPayloadHeader{}
h, err := blocks.WrappedExecutionPayloadHeader(header)
@@ -815,10 +815,10 @@ func Test_matchingWithdrawalsRoot(t *testing.T) {
})
t.Run("withdrawals mismatch", func(t *testing.T) {
local := &v1.ExecutionPayloadCapella{}
p, err := blocks.WrappedExecutionPayloadCapella(local, 0)
p, err := blocks.WrappedExecutionPayloadCapella(local, big.NewInt(0))
require.NoError(t, err)
header := &v1.ExecutionPayloadHeaderCapella{}
h, err := blocks.WrappedExecutionPayloadHeaderCapella(header, 0)
h, err := blocks.WrappedExecutionPayloadHeaderCapella(header, big.NewInt(0))
require.NoError(t, err)
matched, err := matchingWithdrawalsRoot(p, h)
require.NoError(t, err)
@@ -832,13 +832,13 @@ func Test_matchingWithdrawalsRoot(t *testing.T) {
Amount: 3,
}}
local := &v1.ExecutionPayloadCapella{Withdrawals: wds}
p, err := blocks.WrappedExecutionPayloadCapella(local, 0)
p, err := blocks.WrappedExecutionPayloadCapella(local, big.NewInt(0))
require.NoError(t, err)
header := &v1.ExecutionPayloadHeaderCapella{}
wr, err := ssz.WithdrawalSliceRoot(wds, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
header.WithdrawalsRoot = wr[:]
h, err := blocks.WrappedExecutionPayloadHeaderCapella(header, 0)
h, err := blocks.WrappedExecutionPayloadHeaderCapella(header, big.NewInt(0))
require.NoError(t, err)
matched, err := matchingWithdrawalsRoot(p, h)
require.NoError(t, err)

View File

@@ -3,6 +3,7 @@ package validator
import (
"context"
"errors"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -60,7 +61,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
}))
capellaTransitionState, _ := util.DeterministicGenesisStateCapella(t, 1)
wrappedHeaderCapella, err := blocks.WrappedExecutionPayloadHeaderCapella(&pb.ExecutionPayloadHeaderCapella{BlockNumber: 1}, 0)
wrappedHeaderCapella, err := blocks.WrappedExecutionPayloadHeaderCapella(&pb.ExecutionPayloadHeaderCapella{BlockNumber: 1}, big.NewInt(0))
require.NoError(t, err)
require.NoError(t, capellaTransitionState.SetLatestExecutionPayloadHeader(wrappedHeaderCapella))
b2pbCapella := util.NewBeaconBlockCapella()

View File

@@ -296,7 +296,7 @@ func (s *Service) initializeDebugServerRoutes(debugServer *debug.Server) {
s.cfg.Router.HandleFunc("/eth/v1/debug/beacon/states/{state_id}", debugServer.GetBeaconStateSSZ).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v2/debug/beacon/states/{state_id}", debugServer.GetBeaconStateV2).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v2/debug/beacon/heads", debugServer.GetForkChoiceHeadsV2).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v2/debug/fork_choice", debugServer.GetForkChoice).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/debug/fork_choice", debugServer.GetForkChoice).Methods(http.MethodGet)
}
// prysm internal routes

View File

@@ -36,6 +36,18 @@ func init() {
logrus.SetOutput(io.Discard)
}
func combineMaps(maps ...map[string][]string) map[string][]string {
combinedMap := make(map[string][]string)
for _, m := range maps {
for k, v := range m {
combinedMap[k] = v
}
}
return combinedMap
}
func TestServer_InitializeRoutes(t *testing.T) {
s := Service{
cfg: &Config{
@@ -58,80 +70,106 @@ func TestServer_InitializeRoutes(t *testing.T) {
s.initializePrysmNodeServerRoutes(&nodeprysm.Server{})
s.initializePrysmValidatorServerRoutes(&validatorprysm.Server{})
wantRouteList := map[string][]string{
"/eth/v1/beacon/rewards/blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/rewards/attestations/{epoch}": {http.MethodPost},
"/eth/v1/beacon/rewards/sync_committee/{block_id}": {http.MethodPost},
"/eth/v1/builder/states/{state_id}/expected_withdrawals": {http.MethodGet},
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
"/eth/v1/validator/aggregate_attestation": {http.MethodGet},
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
"/eth/v1/validator/aggregate_and_proofs": {http.MethodPost},
"/eth/v1/validator/sync_committee_contribution": {http.MethodGet},
"/eth/v1/validator/sync_committee_subscriptions": {http.MethodPost},
"/eth/v1/validator/beacon_committee_subscriptions": {http.MethodPost},
"/eth/v1/validator/attestation_data": {http.MethodGet},
"/eth/v1/validator/register_validator": {http.MethodPost},
"/eth/v1/validator/duties/attester/{epoch}": {http.MethodPost},
"/eth/v1/validator/duties/proposer/{epoch}": {http.MethodGet},
"/eth/v1/validator/duties/sync/{epoch}": {http.MethodPost},
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
"/eth/v2/validator/blocks/{slot}": {http.MethodGet},
"/eth/v1/validator/blinded_blocks/{slot}": {http.MethodGet},
"/eth/v3/validator/blocks/{slot}": {http.MethodGet},
"/eth/v1/node/syncing": {http.MethodGet},
"/eth/v1/node/identity": {http.MethodGet},
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
"/eth/v1/node/peers": {http.MethodGet},
"/eth/v1/node/peer_count": {http.MethodGet},
"/eth/v1/node/version": {http.MethodGet},
"/eth/v1/node/health": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/committees": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/fork": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/root": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/sync_committees": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/randao": {http.MethodGet},
"/eth/v1/beacon/blocks": {http.MethodPost},
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
"/eth/v2/beacon/blocks": {http.MethodPost},
"/eth/v2/beacon/blinded_blocks": {http.MethodPost},
"/eth/v1/beacon/blocks/{block_id}": {http.MethodGet},
"/eth/v2/beacon/blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/blocks/{block_id}/attestations": {http.MethodGet},
"/eth/v1/beacon/blinded_blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/blocks/{block_id}/root": {http.MethodGet},
"/eth/v1/beacon/pool/attestations": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/voluntary_exits": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/sync_committees": {http.MethodPost},
"/eth/v1/beacon/pool/bls_to_execution_changes": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/proposer_slashings": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/headers": {http.MethodGet},
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
beaconRoutes := map[string][]string{
"/eth/v1/beacon/genesis": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/root": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/fork": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/finality_checkpoints": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/validators": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/states/{state_id}/validators/{validator_id}": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/validator_balances": {http.MethodGet, http.MethodPost},
"/eth/v1/config/deposit_contract": {http.MethodGet},
"/eth/v1/config/fork_schedule": {http.MethodGet},
"/eth/v1/config/spec": {http.MethodGet},
"/eth/v1/events": {http.MethodGet},
"/eth/v1/beacon/light_client/bootstrap/{block_root}": {http.MethodGet},
"/eth/v1/beacon/light_client/updates": {http.MethodGet},
"/eth/v1/beacon/light_client/finality_update": {http.MethodGet},
"/eth/v1/beacon/light_client/optimistic_update": {http.MethodGet},
"/eth/v1/debug/beacon/states/{state_id}": {http.MethodGet},
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
"/eth/v2/debug/beacon/heads": {http.MethodGet},
"/eth/v2/debug/fork_choice": {http.MethodGet},
"/prysm/v1/beacon/weak_subjectivity": {http.MethodGet},
"/prysm/node/trusted_peers": {http.MethodGet, http.MethodPost},
"/prysm/node/trusted_peers/{peer_id}": {http.MethodDelete},
"/prysm/validators/performance": {http.MethodPost},
"/eth/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/committees": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/sync_committees": {http.MethodGet},
"/eth/v1/beacon/states/{state_id}/randao": {http.MethodGet},
"/eth/v1/beacon/headers": {http.MethodGet},
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
"/eth/v2/beacon/blinded_blocks": {http.MethodPost},
"/eth/v1/beacon/blocks": {http.MethodPost},
"/eth/v2/beacon/blocks": {http.MethodPost},
"/eth/v1/beacon/blocks/{block_id}": {http.MethodGet}, //deprecated
"/eth/v2/beacon/blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/blocks/{block_id}/root": {http.MethodGet},
"/eth/v1/beacon/blocks/{block_id}/attestations": {http.MethodGet},
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
"/eth/v1/beacon/rewards/sync_committee/{block_id}": {http.MethodPost},
//"/eth/v1/beacon/deposit_snapshot": {http.MethodGet}, not implemented
"/eth/v1/beacon/rewards/blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/rewards/attestations/{epoch}": {http.MethodPost},
"/eth/v1/beacon/blinded_blocks/{block_id}": {http.MethodGet},
"/eth/v1/beacon/light_client/bootstrap/{block_root}": {http.MethodGet},
"/eth/v1/beacon/light_client/updates": {http.MethodGet},
"/eth/v1/beacon/light_client/finality_update": {http.MethodGet},
"/eth/v1/beacon/light_client/optimistic_update": {http.MethodGet},
"/eth/v1/beacon/pool/attestations": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/proposer_slashings": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/sync_committees": {http.MethodPost},
"/eth/v1/beacon/pool/voluntary_exits": {http.MethodGet, http.MethodPost},
"/eth/v1/beacon/pool/bls_to_execution_changes": {http.MethodGet, http.MethodPost},
}
builderRoutes := map[string][]string{
"/eth/v1/builder/states/{state_id}/expected_withdrawals": {http.MethodGet},
}
configRoutes := map[string][]string{
"/eth/v1/config/fork_schedule": {http.MethodGet},
"/eth/v1/config/spec": {http.MethodGet},
"/eth/v1/config/deposit_contract": {http.MethodGet},
}
debugRoutes := map[string][]string{
"/eth/v1/debug/beacon/states/{state_id}": {http.MethodGet}, //deprecated
"/eth/v2/debug/beacon/states/{state_id}": {http.MethodGet},
"/eth/v2/debug/beacon/heads": {http.MethodGet},
"/eth/v1/debug/fork_choice": {http.MethodGet},
}
eventsRoutes := map[string][]string{
"/eth/v1/events": {http.MethodGet},
}
nodeRoutes := map[string][]string{
"/eth/v1/node/identity": {http.MethodGet},
"/eth/v1/node/peers": {http.MethodGet},
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
"/eth/v1/node/peer_count": {http.MethodGet},
"/eth/v1/node/version": {http.MethodGet},
"/eth/v1/node/syncing": {http.MethodGet},
"/eth/v1/node/health": {http.MethodGet},
}
validatorRoutes := map[string][]string{
"/eth/v1/validator/duties/attester/{epoch}": {http.MethodPost},
"/eth/v1/validator/duties/proposer/{epoch}": {http.MethodGet},
"/eth/v1/validator/duties/sync/{epoch}": {http.MethodPost},
"/eth/v2/validator/blocks/{slot}": {http.MethodGet}, //deprecated
"/eth/v3/validator/blocks/{slot}": {http.MethodGet},
"/eth/v1/validator/blinded_blocks/{slot}": {http.MethodGet}, //deprecated
"/eth/v1/validator/attestation_data": {http.MethodGet},
"/eth/v1/validator/aggregate_attestation": {http.MethodGet},
"/eth/v1/validator/aggregate_and_proofs": {http.MethodPost},
"/eth/v1/validator/beacon_committee_subscriptions": {http.MethodPost},
"/eth/v1/validator/sync_committee_subscriptions": {http.MethodPost},
//"/eth/v1/validator/beacon_committee_selections": {http.MethodPost}, // not implemented
"/eth/v1/validator/sync_committee_contribution": {http.MethodGet},
//"/eth/v1/validator/sync_committee_selections": {http.MethodPost}, // not implemented
"/eth/v1/validator/contribution_and_proofs": {http.MethodPost},
"/eth/v1/validator/prepare_beacon_proposer": {http.MethodPost},
"/eth/v1/validator/register_validator": {http.MethodPost},
"/eth/v1/validator/liveness/{epoch}": {http.MethodPost},
}
prysmCustomRoutes := map[string][]string{
"/prysm/v1/beacon/weak_subjectivity": {http.MethodGet},
"/prysm/node/trusted_peers": {http.MethodGet, http.MethodPost},
"/prysm/node/trusted_peers/{peer_id}": {http.MethodDelete},
"/prysm/validators/performance": {http.MethodPost},
"/eth/v1/beacon/states/{state_id}/validator_count": {http.MethodGet},
}
wantRouteList := combineMaps(beaconRoutes, builderRoutes, configRoutes, debugRoutes, eventsRoutes, nodeRoutes, validatorRoutes, prysmCustomRoutes)
gotRouteList := make(map[string][]string)
err := s.cfg.Router.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
tpl, err1 := route.GetPathTemplate()

View File

@@ -60,7 +60,7 @@ func NewFieldTrie(field types.FieldIndex, fieldInfo types.DataType, elements int
if err := validateElements(field, fieldInfo, elements, length); err != nil {
return nil, err
}
numOfElems := 0
var numOfElems int
if val, ok := elements.(sliceAccessor); ok {
numOfElems = val.Len(val.State())
} else {

View File

@@ -121,6 +121,7 @@ type ReadOnlyValidators interface {
ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error)
ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (ReadOnlyValidator, error)
ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
PublicKeys() ([][fieldparams.BLSPubkeyLength]byte, error)
PubkeyAtIndex(idx primitives.ValidatorIndex) [fieldparams.BLSPubkeyLength]byte
NumValidators() int
ReadFromEveryValidator(f func(idx int, val ReadOnlyValidator) error) error

View File

@@ -1,6 +1,8 @@
package state_native
import (
"math/big"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
@@ -22,10 +24,10 @@ func (b *BeaconState) LatestExecutionPayloadHeader() (interfaces.ExecutionData,
}
if b.version == version.Capella {
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal(), 0)
return blocks.WrappedExecutionPayloadHeaderCapella(b.latestExecutionPayloadHeaderCapellaVal(), big.NewInt(0))
}
return blocks.WrappedExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDenebVal(), 0)
return blocks.WrappedExecutionPayloadHeaderDeneb(b.latestExecutionPayloadHeaderDenebVal(), big.NewInt(0))
}
// latestExecutionPayloadHeaderVal of the beacon state.

View File

@@ -181,6 +181,27 @@ func (b *BeaconState) PubkeyAtIndex(idx primitives.ValidatorIndex) [fieldparams.
return bytesutil.ToBytes48(v.PublicKey)
}
// PublicKeys builds a list of all validator public keys, with each key's index aligned to its validator index.
func (b *BeaconState) PublicKeys() ([][fieldparams.BLSPubkeyLength]byte, error) {
b.lock.RLock()
defer b.lock.RUnlock()
l := b.validatorsLen()
res := make([][fieldparams.BLSPubkeyLength]byte, l)
for i := 0; i < l; i++ {
if features.Get().EnableExperimentalState {
val, err := b.validatorsMultiValue.At(b, uint64(i))
if err != nil {
return nil, err
}
copy(res[i][:], val.PublicKey)
} else {
copy(res[i][:], b.validators[i].PublicKey)
}
}
return res, nil
}
// NumValidators returns the size of the validator registry.
func (b *BeaconState) NumValidators() int {
b.lock.RLock()

View File

@@ -2,6 +2,7 @@ package state_native_test
import (
"context"
"math/big"
"testing"
"github.com/prysmaticlabs/go-bitfield"
@@ -253,7 +254,7 @@ func TestComputeFieldRootsWithHasher_Capella(t *testing.T) {
require.NoError(t, beaconState.SetInactivityScores([]uint64{1, 2, 3}))
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee("current")))
require.NoError(t, beaconState.SetNextSyncCommittee(syncCommittee("next")))
wrappedHeader, err := blocks.WrappedExecutionPayloadHeaderCapella(executionPayloadHeaderCapella(), 0)
wrappedHeader, err := blocks.WrappedExecutionPayloadHeaderCapella(executionPayloadHeaderCapella(), big.NewInt(0))
require.NoError(t, err)
require.NoError(t, beaconState.SetLatestExecutionPayloadHeader(wrappedHeader))
require.NoError(t, beaconState.SetNextWithdrawalIndex(123))

View File

@@ -31,7 +31,7 @@ go_library(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync/backfill:go_default_library",
"//beacon-chain/sync/backfill/coverage:go_default_library",
"//cache/lru:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -332,9 +332,8 @@ func (s *State) CombinedCache() *CombinedCache {
}
func (s *State) slotAvailable(slot primitives.Slot) bool {
// default to assuming node was initialized from genesis - backfill only needs to be specified for checkpoint sync
if s.backfillStatus == nil {
if s.avb == nil {
return true
}
return s.backfillStatus.SlotCovered(slot)
return s.avb.AvailableBlock(slot)
}

View File

@@ -13,7 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
@@ -51,7 +51,7 @@ type State struct {
finalizedInfo *finalizedInfo
epochBoundaryStateCache *epochBoundaryState
saveHotStateDB *saveHotStateDbConfig
backfillStatus *backfill.Status
avb coverage.AvailableBlocker
migrationLock *sync.Mutex
fc forkchoice.ForkChoicer
}
@@ -78,9 +78,11 @@ type finalizedInfo struct {
// Option is a functional option for controlling the initialization of a *State value
type Option func(*State)
func WithBackfillStatus(bfs *backfill.Status) Option {
// WithAvailableBlocker gives stategen an AvailableBlocker, which is used to determine if a given
// block is available. This is necessary because backfill creates a hole in the block history.
func WithAvailableBlocker(avb coverage.AvailableBlocker) Option {
return func(sg *State) {
sg.backfillStatus = bfs
sg.avb = avb
}
}

View File

@@ -2,30 +2,78 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["status.go"],
srcs = [
"batch.go",
"batcher.go",
"metrics.go",
"pool.go",
"service.go",
"status.go",
"verify.go",
"worker.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime:go_default_library",
"//time/slots:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["status_test.go"],
srcs = [
"batcher_test.go",
"pool_test.go",
"service_test.go",
"status_test.go",
"verify_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/blocks/testing:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/dbval:go_default_library",
"//runtime/interop:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,141 @@
package backfill
import (
"fmt"
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
// ErrChainBroken indicates a backfill batch can't be imported to the db because it is not known to be the ancestor
// of the canonical chain.
var ErrChainBroken = errors.New("batch is not the ancestor of a known finalized root")
type batchState int
func (s batchState) String() string {
switch s {
case batchNil:
return "nil"
case batchInit:
return "init"
case batchSequenced:
return "sequenced"
case batchErrRetryable:
return "error_retryable"
case batchImportable:
return "importable"
case batchImportComplete:
return "import_complete"
case batchEndSequence:
return "end_sequence"
default:
return "unknown"
}
}
const (
batchNil batchState = iota
batchInit
batchSequenced
batchErrRetryable
batchImportable
batchImportComplete
batchEndSequence
)
type batchId string
type batch struct {
firstScheduled time.Time
scheduled time.Time
seq int // sequence identifier, ie how many times has the sequence() method served this batch
retries int
begin primitives.Slot
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
results VerifiedROBlocks
err error
state batchState
pid peer.ID
}
func (b batch) logFields() log.Fields {
return map[string]interface{}{
"batch_id": b.id(),
"state": b.state.String(),
"scheduled": b.scheduled.String(),
"seq": b.seq,
"retries": b.retries,
"begin": b.begin,
"end": b.end,
"pid": b.pid,
}
}
func (b batch) replaces(r batch) bool {
if r.state == batchImportComplete {
return false
}
if b.begin != r.begin {
return false
}
if b.end != r.end {
return false
}
return b.seq >= r.seq
}
func (b batch) id() batchId {
return batchId(fmt.Sprintf("%d:%d", b.begin, b.end))
}
func (b batch) ensureParent(expected [32]byte) error {
tail := b.results[len(b.results)-1]
if tail.Root() != expected {
return errors.Wrapf(ErrChainBroken, "last parent_root=%#x, tail root=%#x", expected, tail.Root())
}
return nil
}
func (b batch) request() *eth.BeaconBlocksByRangeRequest {
return &eth.BeaconBlocksByRangeRequest{
StartSlot: b.begin,
Count: uint64(b.end - b.begin),
Step: 1,
}
}
func (b batch) withState(s batchState) batch {
if s == batchSequenced {
b.scheduled = time.Now()
switch b.state {
case batchErrRetryable:
b.retries += 1
log.WithFields(b.logFields()).Info("sequencing batch for retry")
case batchInit, batchNil:
b.firstScheduled = b.scheduled
}
}
if s == batchImportComplete {
backfillBatchTimeRoundtrip.Observe(float64(time.Since(b.firstScheduled).Milliseconds()))
log.WithFields(b.logFields()).Debug("Backfill batch imported.")
}
b.state = s
b.seq += 1
return b
}
func (b batch) withPeer(p peer.ID) batch {
b.pid = p
backfillBatchTimeWaiting.Observe(float64(time.Since(b.scheduled).Milliseconds()))
return b
}
func (b batch) withRetryableError(err error) batch {
b.err = err
return b.withState(batchErrRetryable)
}

View File

@@ -0,0 +1,197 @@
package backfill
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
)
var errMaxBatches = errors.New("backfill batch requested in excess of max outstanding batches")
var errEndSequence = errors.New("sequence has terminated, no more backfill batches will be produced")
var errCannotDecreaseMinimum = errors.New("the minimum backfill slot can only be increased, not decreased")
type batchSequencer struct {
batcher batcher
seq []batch
}
// sequence() is meant as a verb "arrange in a particular order".
// sequence determines the next set of batches that should be worked on based on the state of the batches
// in its internal view. sequence relies on update() for updates to its view of the
// batches it has previously sequenced.
func (c *batchSequencer) sequence() ([]batch, error) {
s := make([]batch, 0)
// batch start slots are in descending order, c.seq[n].begin == c.seq[n+1].end
for i := range c.seq {
switch c.seq[i].state {
case batchInit, batchErrRetryable:
c.seq[i] = c.seq[i].withState(batchSequenced)
s = append(s, c.seq[i])
case batchNil:
// batchNil is the zero value of the batch type.
// This case means that we are initializing a batch that was created by the
// initial allocation of the seq slice, so batcher need to compute its bounds.
var b batch
if i == 0 {
// The first item in the list is a special case, subsequent items are initialized
// relative to the preceding batches.
b = c.batcher.before(c.batcher.max)
} else {
b = c.batcher.beforeBatch(c.seq[i-1])
}
c.seq[i] = b.withState(batchSequenced)
s = append(s, c.seq[i])
case batchEndSequence:
if len(s) == 0 {
s = append(s, c.seq[i])
}
default:
continue
}
}
if len(s) == 0 {
return nil, errMaxBatches
}
return s, nil
}
// update serves 2 roles.
// - updating batchSequencer's copy of the given batch.
// - removing batches that are completely imported from the sequence,
// so that they are not returned the next time import() is called, and populating
// seq with new batches that are ready to be worked on.
func (c *batchSequencer) update(b batch) {
done := 0
for i := 0; i < len(c.seq); i++ {
if b.replaces(c.seq[i]) {
c.seq[i] = b
}
// Assumes invariant that batches complete and update is called in order.
// This should be true because the code using the sequencer doesn't know the expected parent
// for a batch until it imports the previous batch.
if c.seq[i].state == batchImportComplete {
done += 1
continue
}
// Move the unfinished batches to overwrite the finished ones.
// eg consider [a,b,c,d,e] where a,b are done
// when i==2, done==2 (since done was incremented for a and b)
// so we want to copy c to a, then on i=3, d to b, then on i=4 e to c.
c.seq[i-done] = c.seq[i]
}
// Overwrite the moved batches with the next ones in the sequence.
// Continuing the example in the comment above, len(c.seq)==5, done=2, so i=3.
// We want to replace index 3 with the batch that should be processed after index 2,
// which was previously the earliest known batch, and index 4 with the batch that should
// be processed after index 3, the new earliest batch.
for i := len(c.seq) - done; i < len(c.seq); i++ {
c.seq[i] = c.batcher.beforeBatch(c.seq[i-1])
}
}
// importable returns all batches that are ready to be imported. This means they satisfy 2 conditions:
// - They are in state batchImportable, which means their data has been downloaded and proposer signatures have been verified.
// - There are no batches that are not in state batchImportable between them and the start of the slice. This ensures that they
// can be connected to the canonical chain, either because the root of the last block in the batch matches the parent_root of
// the oldest block in the canonical chain, or because the root of the last block in the batch matches the parent_root of the
// new block preceding them in the slice (which must connect to the batch before it, or to the canonical chain if it is first).
func (c *batchSequencer) importable() []batch {
imp := make([]batch, 0)
for i := range c.seq {
if c.seq[i].state == batchImportable {
imp = append(imp, c.seq[i])
continue
}
// as soon as we hit a batch with a different state, we return everything leading to it.
// If the first element isn't importable, we'll return an empty slice.
break
}
return imp
}
// moveMinimum enables the backfill service to change the slot where the batcher will start replying with
// batch state batchEndSequence (signaling that no new batches will be produced). This is done in response to
// epochs advancing, which shrinks the gap between <checkpoint slot> and <current slot>-MIN_EPOCHS_FOR_BLOCK_REQUESTS,
// allowing the node to download a smaller number of blocks.
func (c *batchSequencer) moveMinimum(min primitives.Slot) error {
if min < c.batcher.min {
return errCannotDecreaseMinimum
}
c.batcher.min = min
return nil
}
// countWithState provides a view into how many batches are in a particular state
// to be used for logging or metrics purposes.
func (c *batchSequencer) countWithState(s batchState) int {
n := 0
for i := 0; i < len(c.seq); i++ {
if c.seq[i].state == s {
n += 1
}
}
return n
}
// numTodo computes the number of remaining batches for metrics and logging purposes.
func (c *batchSequencer) numTodo() int {
if len(c.seq) == 0 {
return 0
}
lowest := c.seq[len(c.seq)-1]
todo := 0
if lowest.state != batchEndSequence {
todo = c.batcher.remaining(lowest.begin)
}
for _, b := range c.seq {
switch b.state {
case batchEndSequence, batchImportComplete, batchNil:
continue
default:
todo += 1
}
}
return todo
}
func newBatchSequencer(seqLen int, min, max, size primitives.Slot) *batchSequencer {
b := batcher{min: min, max: max, size: size}
seq := make([]batch, seqLen)
return &batchSequencer{batcher: b, seq: seq}
}
type batcher struct {
min primitives.Slot
max primitives.Slot
size primitives.Slot
}
func (r batcher) remaining(upTo primitives.Slot) int {
if r.min >= upTo {
return 0
}
delta := upTo - r.min
if delta%r.size != 0 {
return int(delta/r.size) + 1
}
return int(delta / r.size)
}
func (r batcher) beforeBatch(upTo batch) batch {
return r.before(upTo.begin)
}
func (r batcher) before(upTo primitives.Slot) batch {
// upTo is an exclusive upper bound. Requesting a batch before the lower bound of backfill signals the end of the
// backfill process.
if upTo <= r.min {
return batch{begin: upTo, end: upTo, state: batchEndSequence}
}
begin := r.min
if upTo > r.size+r.min {
begin = upTo - r.size
}
// batch.end is exclusive, .begin is inclusive, so the prev.end = next.begin
return batch{begin: begin, end: upTo, state: batchInit}
}

View File

@@ -0,0 +1,208 @@
package backfill
import (
"fmt"
"testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestBatcherBefore(t *testing.T) {
cases := []struct {
name string
b batcher
upTo []primitives.Slot
expect []batch
}{
{
name: "size 10",
b: batcher{min: 0, size: 10},
upTo: []primitives.Slot{33, 30, 10, 6},
expect: []batch{
{begin: 23, end: 33, state: batchInit},
{begin: 20, end: 30, state: batchInit},
{begin: 0, end: 10, state: batchInit},
{begin: 0, end: 6, state: batchInit},
},
},
{
name: "size 4",
b: batcher{min: 0, size: 4},
upTo: []primitives.Slot{33, 6, 4},
expect: []batch{
{begin: 29, end: 33, state: batchInit},
{begin: 2, end: 6, state: batchInit},
{begin: 0, end: 4, state: batchInit},
},
},
{
name: "trigger end",
b: batcher{min: 20, size: 10},
upTo: []primitives.Slot{33, 30, 25, 21, 20, 19},
expect: []batch{
{begin: 23, end: 33, state: batchInit},
{begin: 20, end: 30, state: batchInit},
{begin: 20, end: 25, state: batchInit},
{begin: 20, end: 21, state: batchInit},
{begin: 20, end: 20, state: batchEndSequence},
{begin: 19, end: 19, state: batchEndSequence},
},
},
}
for _, c := range cases {
for i := range c.upTo {
upTo := c.upTo[i]
expect := c.expect[i]
t.Run(fmt.Sprintf("%s upTo %d", c.name, upTo), func(t *testing.T) {
got := c.b.before(upTo)
require.Equal(t, expect.begin, got.begin)
require.Equal(t, expect.end, got.end)
require.Equal(t, expect.state, got.state)
})
}
}
}
func TestBatchSequencer(t *testing.T) {
var min, max, size primitives.Slot
seqLen := 8
min = 0
max = 11235
size = 64
seq := newBatchSequencer(seqLen, min, max, size)
expected := []batch{
{begin: 11171, end: 11235},
{begin: 11107, end: 11171},
{begin: 11043, end: 11107},
{begin: 10979, end: 11043},
{begin: 10915, end: 10979},
{begin: 10851, end: 10915},
{begin: 10787, end: 10851},
{begin: 10723, end: 10787},
}
got, err := seq.sequence()
require.Equal(t, seqLen, len(got))
for i := 0; i < seqLen; i++ {
g := got[i]
exp := expected[i]
require.NoError(t, err)
require.Equal(t, exp.begin, g.begin)
require.Equal(t, exp.end, g.end)
require.Equal(t, batchSequenced, g.state)
}
// This should give us the error indicating there are too many outstanding batches.
_, err = seq.sequence()
require.ErrorIs(t, err, errMaxBatches)
// mark the last batch completed so we can call sequence again.
last := seq.seq[len(seq.seq)-1]
// With this state, the batch should get served back to us as the next batch.
last.state = batchErrRetryable
seq.update(last)
nextS, err := seq.sequence()
require.Equal(t, 1, len(nextS))
next := nextS[0]
require.NoError(t, err)
require.Equal(t, last.begin, next.begin)
require.Equal(t, last.end, next.end)
// sequence() should replace the batchErrRetryable state with batchSequenced.
require.Equal(t, batchSequenced, next.state)
// No batches have been marked importable.
require.Equal(t, 0, len(seq.importable()))
// Mark our batch importable and make sure it shows up in the list of importable batches.
next.state = batchImportable
seq.update(next)
require.Equal(t, 0, len(seq.importable()))
first := seq.seq[0]
first.state = batchImportable
seq.update(first)
require.Equal(t, 1, len(seq.importable()))
require.Equal(t, len(seq.seq), seqLen)
// change the last element back to batchInit so that the importable test stays simple
last = seq.seq[len(seq.seq)-1]
last.state = batchInit
seq.update(last)
// ensure that the number of importable elements grows as the list is marked importable
for i := 0; i < len(seq.seq); i++ {
seq.seq[i].state = batchImportable
require.Equal(t, i+1, len(seq.importable()))
}
// reset everything to init
for i := 0; i < len(seq.seq); i++ {
seq.seq[i].state = batchInit
require.Equal(t, 0, len(seq.importable()))
}
// loop backwards and make sure importable is zero until the first element is importable
for i := len(seq.seq) - 1; i > 0; i-- {
seq.seq[i].state = batchImportable
require.Equal(t, 0, len(seq.importable()))
}
seq.seq[0].state = batchImportable
require.Equal(t, len(seq.seq), len(seq.importable()))
// reset everything to init again
for i := 0; i < len(seq.seq); i++ {
seq.seq[i].state = batchInit
require.Equal(t, 0, len(seq.importable()))
}
// set first 3 elements to importable. we should see them in the result for importable()
// and be able to use update to cycle them away.
seq.seq[0].state, seq.seq[1].state, seq.seq[2].state = batchImportable, batchImportable, batchImportable
require.Equal(t, 3, len(seq.importable()))
a, b, c, z := seq.seq[0], seq.seq[1], seq.seq[2], seq.seq[3]
require.NotEqual(t, z.begin, seq.seq[2].begin)
require.NotEqual(t, z.begin, seq.seq[1].begin)
require.NotEqual(t, z.begin, seq.seq[0].begin)
a.state, b.state, c.state = batchImportComplete, batchImportComplete, batchImportComplete
seq.update(a)
// follow z as it moves down the chain to the first spot
require.Equal(t, z.begin, seq.seq[2].begin)
require.NotEqual(t, z.begin, seq.seq[1].begin)
require.NotEqual(t, z.begin, seq.seq[0].begin)
seq.update(b)
require.NotEqual(t, z.begin, seq.seq[2].begin)
require.Equal(t, z.begin, seq.seq[1].begin)
require.NotEqual(t, z.begin, seq.seq[0].begin)
seq.update(c)
require.NotEqual(t, z.begin, seq.seq[2].begin)
require.NotEqual(t, z.begin, seq.seq[1].begin)
require.Equal(t, z.begin, seq.seq[0].begin)
// Check integrity of begin/end alignment across the sequence.
// Also update all the states to sequenced for the convenience of the next test.
for i := 1; i < len(seq.seq); i++ {
require.Equal(t, seq.seq[i].end, seq.seq[i-1].begin)
// won't touch the first element, which is fine because it is marked complete below.
seq.seq[i].state = batchSequenced
}
// set the min for the batcher close to the lowest slot. This will force the next batch to be partial and the batch
// after that to be the final batch.
newMin := seq.seq[len(seq.seq)-1].begin - 30
seq.batcher.min = newMin
first = seq.seq[0]
first.state = batchImportComplete
// update() with a complete state will cause the sequence to be extended with an additional batch
seq.update(first)
lastS, err := seq.sequence()
last = lastS[0]
require.NoError(t, err)
require.Equal(t, newMin, last.begin)
require.Equal(t, seq.seq[len(seq.seq)-2].begin, last.end)
// Mark first batch done again, this time check that sequence() gives errEndSequence.
first = seq.seq[0]
first.state = batchImportComplete
// update() with a complete state will cause the sequence to be extended with an additional batch
seq.update(first)
endExp, err := seq.sequence()
require.NoError(t, err)
require.Equal(t, 1, len(endExp))
end := endExp[0]
//require.ErrorIs(t, err, errEndSequence)
require.Equal(t, batchEndSequence, end.state)
}

View File

@@ -0,0 +1,9 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["coverage.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/backfill/coverage",
visibility = ["//visibility:public"],
deps = ["//consensus-types/primitives:go_default_library"],
)

View File

@@ -0,0 +1,9 @@
package coverage
import "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
// AvailableBlocker can be used to check whether there is a finalized block in the db for the given slot.
// This interface is typically fulfilled by backfill.Store.
type AvailableBlocker interface {
AvailableBlock(primitives.Slot) bool
}

View File

@@ -0,0 +1,67 @@
package backfill
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
oldestBatch = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "backfill_earliest_wip_slot",
Help: "Earliest slot that has been assigned to a worker.",
},
)
batchesWaiting = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "backfill_importable_batches_waiting",
Help: "Number of batches that are ready to be imported once they can be connected to the existing chain.",
},
)
backfillRemainingBatches = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "backfill_remaining_batches",
Help: "Backfill remaining batches.",
},
)
backfillBatchesImported = promauto.NewCounter(
prometheus.CounterOpts{
Name: "backfill_batches_imported",
Help: "Number of backfill batches downloaded and imported.",
},
)
backfillBatchApproximateBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "backfill_batch_bytes_downloaded",
Help: "Count of bytes downloaded from peers",
},
)
backfillBatchTimeRoundtrip = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "backfill_batch_time_roundtrip",
Help: "Total time to import batch, from first scheduled to imported.",
Buckets: []float64{400, 800, 1600, 3200, 6400, 12800},
},
)
backfillBatchTimeWaiting = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "backfill_batch_time_waiting",
Help: "Time batch waited for a suitable peer.",
Buckets: []float64{50, 100, 300, 1000, 2000},
},
)
backfillBatchTimeDownloading = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "backfill_batch_time_download",
Help: "Time batch spent downloading blocks from peer.",
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
},
)
backfillBatchTimeVerifying = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "backfill_batch_time_verify",
Help: "Time batch spent downloading blocks from peer.",
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
},
)
)

View File

@@ -0,0 +1,152 @@
package backfill
import (
"context"
"math"
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
log "github.com/sirupsen/logrus"
)
type batchWorkerPool interface {
spawn(ctx context.Context, n int, clock *startup.Clock, a PeerAssigner, v *verifier)
todo(b batch)
complete() (batch, error)
}
type worker interface {
run(context.Context)
}
type newWorker func(id workerId, in, out chan batch, c *startup.Clock, v *verifier) worker
func defaultNewWorker(p p2p.P2P) newWorker {
return func(id workerId, in, out chan batch, c *startup.Clock, v *verifier) worker {
return newP2pWorker(id, p, in, out, c, v)
}
}
type p2pBatchWorkerPool struct {
maxBatches int
newWorker newWorker
toWorkers chan batch
fromWorkers chan batch
toRouter chan batch
fromRouter chan batch
shutdownErr chan error
endSeq []batch
ctx context.Context
cancel func()
}
var _ batchWorkerPool = &p2pBatchWorkerPool{}
func newP2PBatchWorkerPool(p p2p.P2P, maxBatches int) *p2pBatchWorkerPool {
nw := defaultNewWorker(p)
return &p2pBatchWorkerPool{
newWorker: nw,
toRouter: make(chan batch, maxBatches),
fromRouter: make(chan batch, maxBatches),
toWorkers: make(chan batch),
fromWorkers: make(chan batch),
maxBatches: maxBatches,
shutdownErr: make(chan error),
}
}
func (p *p2pBatchWorkerPool) spawn(ctx context.Context, n int, c *startup.Clock, a PeerAssigner, v *verifier) {
p.ctx, p.cancel = context.WithCancel(ctx)
go p.batchRouter(a)
for i := 0; i < n; i++ {
go p.newWorker(workerId(i), p.toWorkers, p.fromWorkers, c, v).run(p.ctx)
}
}
func (p *p2pBatchWorkerPool) todo(b batch) {
// Intercept batchEndSequence batches so workers can remain unaware of this state.
// Workers don't know what to do with batchEndSequence batches. They are a signal to the pool that the batcher
// has stopped producing things for the workers to do and the pool is close to winding down. See complete()
// to understand how the pool manages the state where all workers are idle
// and all incoming batches signal end of sequence.
if b.state == batchEndSequence {
p.endSeq = append(p.endSeq, b)
return
}
p.toRouter <- b
}
func (p *p2pBatchWorkerPool) complete() (batch, error) {
if len(p.endSeq) == p.maxBatches {
return p.endSeq[0], errEndSequence
}
select {
case b := <-p.fromRouter:
return b, nil
case err := <-p.shutdownErr:
return batch{}, errors.Wrap(err, "fatal error from backfill worker pool")
case <-p.ctx.Done():
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
return batch{}, p.ctx.Err()
}
}
func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
busy := make(map[peer.ID]bool)
todo := make([]batch, 0)
rt := time.NewTicker(time.Second)
earliest := primitives.Slot(math.MaxUint64)
for {
select {
case b := <-p.toRouter:
todo = append(todo, b)
case <-rt.C:
// Worker assignments can fail if assignBatch can't find a suitable peer.
// This ticker exists to periodically break out of the channel select
// to retry failed assignments.
case b := <-p.fromWorkers:
pid := b.pid
busy[pid] = false
p.fromRouter <- b
case <-p.ctx.Done():
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
p.shutdown(p.ctx.Err())
return
}
if len(todo) == 0 {
continue
}
// Try to assign as many outstanding batches as possible to peers and feed the assigned batches to workers.
assigned, err := pa.Assign(busy, len(todo))
if err != nil {
if errors.Is(err, peers.ErrInsufficientSuitable) {
// Transient error resulting from insufficient number of connected peers. Leave batches in
// queue and get to them whenever the peer situation is resolved.
continue
}
p.shutdown(err)
return
}
for _, pid := range assigned {
busy[pid] = true
todo[0].pid = pid
p.toWorkers <- todo[0].withPeer(pid)
if todo[0].begin < earliest {
earliest = todo[0].begin
oldestBatch.Set(float64(earliest))
}
todo = todo[1:]
}
}
}
func (p *p2pBatchWorkerPool) shutdown(err error) {
p.cancel()
p.shutdownErr <- err
}

View File

@@ -0,0 +1,78 @@
package backfill
import (
"context"
"testing"
"time"
"github.com/libp2p/go-libp2p/core/peer"
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
type mockAssigner struct {
err error
assign []peer.ID
}
// Assign satisfies the PeerAssigner interface so that mockAssigner can be used in tests
// in place of the concrete p2p implementation of PeerAssigner.
func (m mockAssigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
if m.err != nil {
return nil, m.err
}
return m.assign, nil
}
var _ PeerAssigner = &mockAssigner{}
func TestPoolDetectAllEnded(t *testing.T) {
nw := 5
p2p := p2ptest.NewTestP2P(t)
ctx := context.Background()
ma := &mockAssigner{}
pool := newP2PBatchWorkerPool(p2p, nw)
st, err := util.NewBeaconState()
require.NoError(t, err)
keys, err := st.PublicKeys()
require.NoError(t, err)
v, err := newBackfillVerifier(st.GenesisValidatorsRoot(), keys)
require.NoError(t, err)
pool.spawn(ctx, nw, startup.NewClock(time.Now(), [32]byte{}), ma, v)
br := batcher{min: 10, size: 10}
endSeq := br.before(0)
require.Equal(t, batchEndSequence, endSeq.state)
for i := 0; i < nw; i++ {
pool.todo(endSeq)
}
b, err := pool.complete()
require.ErrorIs(t, err, errEndSequence)
require.Equal(t, b.end, endSeq.end)
}
type mockPool struct {
spawnCalled []int
finishedChan chan batch
finishedErr chan error
todoChan chan batch
}
func (m *mockPool) spawn(_ context.Context, _ int, _ *startup.Clock, _ PeerAssigner, _ *verifier) {
}
func (m *mockPool) todo(b batch) {
m.todoChan <- b
}
func (m *mockPool) complete() (batch, error) {
select {
case b := <-m.finishedChan:
return b, nil
case err := <-m.finishedErr:
return batch{}, err
}
}
var _ batchWorkerPool = &mockPool{}

View File

@@ -0,0 +1,300 @@
package backfill
import (
"context"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
"github.com/prysmaticlabs/prysm/v4/runtime"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
)
type Service struct {
ctx context.Context
store *Store
ms minimumSlotter
cw startup.ClockWaiter
enabled bool // service is disabled by default while feature is experimental
nWorkers int
batchSeq *batchSequencer
batchSize uint64
pool batchWorkerPool
verifier *verifier
p2p p2p.P2P
pa PeerAssigner
batchImporter batchImporter
}
var _ runtime.Service = (*Service)(nil)
type ServiceOption func(*Service) error
func WithEnableBackfill(enabled bool) ServiceOption {
return func(s *Service) error {
s.enabled = enabled
return nil
}
}
func WithWorkerCount(n int) ServiceOption {
return func(s *Service) error {
s.nWorkers = n
return nil
}
}
func WithBatchSize(n uint64) ServiceOption {
return func(s *Service) error {
s.batchSize = n
return nil
}
}
type minimumSlotter interface {
minimumSlot() primitives.Slot
setClock(*startup.Clock)
}
type defaultMinimumSlotter struct {
clock *startup.Clock
cw startup.ClockWaiter
ctx context.Context
}
func (d defaultMinimumSlotter) minimumSlot() primitives.Slot {
if d.clock == nil {
var err error
d.clock, err = d.cw.WaitForClock(d.ctx)
if err != nil {
log.WithError(err).Fatal("failed to obtain system/genesis clock, unable to start backfill service")
}
}
return minimumBackfillSlot(d.clock.CurrentSlot())
}
func (d defaultMinimumSlotter) setClock(c *startup.Clock) {
//nolint:all
d.clock = c
}
var _ minimumSlotter = &defaultMinimumSlotter{}
type batchImporter func(ctx context.Context, b batch, su *Store) (*dbval.BackfillStatus, error)
func defaultBatchImporter(ctx context.Context, b batch, su *Store) (*dbval.BackfillStatus, error) {
status := su.status()
if err := b.ensureParent(bytesutil.ToBytes32(status.LowParentRoot)); err != nil {
return status, err
}
// Import blocks to db and update db state to reflect the newly imported blocks.
// Other parts of the beacon node may use the same StatusUpdater instance
// via the coverage.AvailableBlocker interface to safely determine if a given slot has been backfilled.
status, err := su.fillBack(ctx, b.results)
if err != nil {
log.WithError(err).Fatal("Non-recoverable db error in backfill service, quitting.")
}
return status, nil
}
// PeerAssigner describes a type that provides an Assign method, which can assign the best peer
// to service an RPC request. The Assign method takes a map of peers that should be excluded,
// allowing the caller to avoid making multiple concurrent requests to the same peer.
type PeerAssigner interface {
Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error)
}
// NewService initializes the backfill Service. Like all implementations of the Service interface,
// the service won't begin its runloop until Start() is called.
func NewService(ctx context.Context, su *Store, cw startup.ClockWaiter, p p2p.P2P, pa PeerAssigner, opts ...ServiceOption) (*Service, error) {
s := &Service{
ctx: ctx,
store: su,
cw: cw,
ms: &defaultMinimumSlotter{cw: cw, ctx: ctx},
p2p: p,
pa: pa,
batchImporter: defaultBatchImporter,
}
for _, o := range opts {
if err := o(s); err != nil {
return nil, err
}
}
s.pool = newP2PBatchWorkerPool(p, s.nWorkers)
return s, nil
}
func (s *Service) initVerifier(ctx context.Context) (*verifier, error) {
cps, err := s.store.originState(ctx)
if err != nil {
return nil, err
}
keys, err := cps.PublicKeys()
if err != nil {
return nil, errors.Wrap(err, "Unable to retrieve public keys for all validators in the origin state")
}
return newBackfillVerifier(cps.GenesisValidatorsRoot(), keys)
}
func (s *Service) updateComplete() bool {
b, err := s.pool.complete()
if err != nil {
if errors.Is(err, errEndSequence) {
log.WithField("backfill_slot", b.begin).Info("Backfill is complete")
return true
}
log.WithError(err).Fatal("Non-recoverable error in backfill service, quitting.")
}
s.batchSeq.update(b)
return false
}
func (s *Service) importBatches(ctx context.Context) {
importable := s.batchSeq.importable()
imported := 0
defer func() {
if imported == 0 {
return
}
backfillBatchesImported.Add(float64(imported))
}()
for i := range importable {
ib := importable[i]
if len(ib.results) == 0 {
log.WithFields(ib.logFields()).Error("Batch with no results, skipping importer.")
}
_, err := s.batchImporter(ctx, ib, s.store)
if err != nil {
log.WithError(err).WithFields(ib.logFields()).Debug("Backfill batch failed to import.")
s.downscore(ib)
s.batchSeq.update(ib.withState(batchErrRetryable))
// If a batch fails, the subsequent batches are no longer considered importable.
break
}
s.batchSeq.update(ib.withState(batchImportComplete))
imported += 1
// Calling update with state=batchImportComplete will advance the batch list.
}
nt := s.batchSeq.numTodo()
log.WithField("imported", imported).WithField("importable", len(importable)).
WithField("batches_remaining", nt).
Info("Backfill batches processed.")
backfillRemainingBatches.Set(float64(nt))
}
func (s *Service) scheduleTodos() {
batches, err := s.batchSeq.sequence()
if err != nil {
// This typically means we have several importable batches, but they are stuck behind a batch that needs
// to complete first so that we can chain parent roots across batches.
// ie backfilling [[90..100), [80..90), [70..80)], if we complete [70..80) and [80..90) but not [90..100),
// we can't move forward until [90..100) completes, because we need to confirm 99 connects to 100,
// and then we'll have the parent_root expected by 90 to ensure it matches the root for 89,
// at which point we know we can process [80..90).
if errors.Is(err, errMaxBatches) {
log.Debug("Backfill batches waiting for descendent batch to complete.")
return
}
}
for _, b := range batches {
s.pool.todo(b)
}
}
// Start begins the runloop of backfill.Service in the current goroutine.
func (s *Service) Start() {
if !s.enabled {
log.Info("Exiting backfill service; not enabled.")
return
}
ctx, cancel := context.WithCancel(s.ctx)
defer func() {
cancel()
}()
clock, err := s.cw.WaitForClock(ctx)
if err != nil {
log.WithError(err).Fatal("Backfill service failed to start while waiting for genesis data.")
}
s.ms.setClock(clock)
status := s.store.status()
// Exit early if there aren't going to be any batches to backfill.
if primitives.Slot(status.LowSlot) <= s.ms.minimumSlot() {
log.WithField("minimum_required_slot", s.ms.minimumSlot()).
WithField("backfill_lowest_slot", status.LowSlot).
Info("Exiting backfill service; minimum block retention slot > lowest backfilled block.")
return
}
s.verifier, err = s.initVerifier(ctx)
if err != nil {
log.WithError(err).Fatal("Unable to initialize backfill verifier, quitting.")
}
s.pool.spawn(ctx, s.nWorkers, clock, s.pa, s.verifier)
s.batchSeq = newBatchSequencer(s.nWorkers, s.ms.minimumSlot(), primitives.Slot(status.LowSlot), primitives.Slot(s.batchSize))
if err = s.initBatches(); err != nil {
log.WithError(err).Fatal("Non-recoverable error in backfill service, quitting.")
}
for {
if ctx.Err() != nil {
return
}
if allComplete := s.updateComplete(); allComplete {
return
}
s.importBatches(ctx)
batchesWaiting.Set(float64(s.batchSeq.countWithState(batchImportable)))
if err := s.batchSeq.moveMinimum(s.ms.minimumSlot()); err != nil {
log.WithError(err).Fatal("Non-recoverable error in backfill service, quitting.")
}
s.scheduleTodos()
}
}
func (s *Service) initBatches() error {
batches, err := s.batchSeq.sequence()
if err != nil {
return err
}
for _, b := range batches {
s.pool.todo(b)
}
return nil
}
func (s *Service) downscore(b batch) {
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(b.pid)
}
func (s *Service) Stop() error {
return nil
}
func (s *Service) Status() error {
return nil
}
// minimumBackfillSlot determines the lowest slot that backfill needs to download based on looking back
// MIN_EPOCHS_FOR_BLOCK_REQUESTS from the current slot.
func minimumBackfillSlot(current primitives.Slot) primitives.Slot {
oe := helpers.MinEpochsForBlockRequests()
if oe > slots.MaxSafeEpoch() {
oe = slots.MaxSafeEpoch()
}
offset := slots.UnsafeEpochStart(oe)
if offset > current {
return 0
}
return current - offset
}

View File

@@ -0,0 +1,89 @@
package backfill
import (
"context"
"testing"
"time"
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
type mockMinimumSlotter struct {
min primitives.Slot
}
var _ minimumSlotter = &mockMinimumSlotter{}
func (m mockMinimumSlotter) minimumSlot() primitives.Slot {
return m.min
}
func (m mockMinimumSlotter) setClock(*startup.Clock) {
}
func TestServiceInit(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*300)
defer cancel()
db := &mockBackfillDB{}
su, err := NewUpdater(ctx, db)
require.NoError(t, err)
nWorkers := 5
var batchSize uint64 = 100
nBatches := nWorkers * 2
var high uint64 = 11235
originRoot := [32]byte{}
origin, err := util.NewBeaconState()
require.NoError(t, err)
db.states = map[[32]byte]state.BeaconState{originRoot: origin}
su.bs = &dbval.BackfillStatus{
LowSlot: high,
OriginRoot: originRoot[:],
}
remaining := nBatches
cw := startup.NewClockSynchronizer()
require.NoError(t, cw.SetClock(startup.NewClock(time.Now(), [32]byte{})))
pool := &mockPool{todoChan: make(chan batch, nWorkers), finishedChan: make(chan batch, nWorkers)}
p2pt := p2ptest.NewTestP2P(t)
srv, err := NewService(ctx, su, cw, p2pt, &mockAssigner{}, WithBatchSize(batchSize), WithWorkerCount(nWorkers), WithEnableBackfill(true))
require.NoError(t, err)
srv.ms = mockMinimumSlotter{min: primitives.Slot(high - batchSize*uint64(nBatches))}
srv.pool = pool
srv.batchImporter = func(context.Context, batch, *Store) (*dbval.BackfillStatus, error) {
return &dbval.BackfillStatus{}, nil
}
go srv.Start()
todo := make([]batch, 0)
todo = testReadN(t, ctx, pool.todoChan, nWorkers, todo)
require.Equal(t, nWorkers, len(todo))
for i := 0; i < remaining; i++ {
b := todo[i]
if b.state == batchSequenced {
b.state = batchImportable
}
pool.finishedChan <- b
todo = testReadN(t, ctx, pool.todoChan, 1, todo)
}
require.Equal(t, remaining+nWorkers, len(todo))
for i := remaining; i < remaining+nWorkers; i++ {
require.Equal(t, batchEndSequence, todo[i].state)
}
}
func testReadN(t *testing.T, ctx context.Context, c chan batch, n int, into []batch) []batch {
for i := 0; i < n; i++ {
select {
case b := <-c:
into = append(into, b)
case <-ctx.Done():
// this means we hit the timeout, so something went wrong.
require.Equal(t, true, false)
}
}
return into
}

View File

@@ -2,121 +2,167 @@ package backfill
import (
"context"
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
)
// NewStatus correctly initializes a Status value with the required database value.
func NewStatus(store DB) *Status {
return &Status{
var errBatchDisconnected = errors.New("Highest block root in backfill batch doesn't match next parent_root")
// NewUpdater correctly initializes a StatusUpdater value with the required database value.
func NewUpdater(ctx context.Context, store BeaconDB) (*Store, error) {
s := &Store{
store: store,
}
status, err := s.store.BackfillStatus(ctx)
if err != nil {
if errors.Is(err, db.ErrNotFound) {
return s, s.recoverLegacy(ctx)
}
return nil, errors.Wrap(err, "db error while reading status of previous backfill")
}
s.swapStatus(status)
return s, nil
}
// Status provides a way to update and query the status of a backfill process that may be necessary to track when
// Store provides a way to update and query the status of a backfill process that may be necessary to track when
// a node was initialized via checkpoint sync. With checkpoint sync, there will be a gap in node history from genesis
// until the checkpoint sync origin block. Status provides the means to update the value keeping track of the lower
// end of the missing block range via the Advance() method, to check whether a Slot is missing from the database
// via the SlotCovered() method, and to see the current StartGap() and EndGap().
type Status struct {
start primitives.Slot
end primitives.Slot
store DB
// until the checkpoint sync origin block. Store provides the means to update the value keeping track of the lower
// end of the missing block range via the FillFwd() method, to check whether a Slot is missing from the database
// via the AvailableBlock() method, and to see the current StartGap() and EndGap().
type Store struct {
sync.RWMutex
store BeaconDB
genesisSync bool
bs *dbval.BackfillStatus
}
// SlotCovered uses StartGap() and EndGap() to determine if the given slot is covered by the current chain history.
// If the slot is <= StartGap(), or >= EndGap(), the result is true.
// If the slot is between StartGap() and EndGap(), the result is false.
func (s *Status) SlotCovered(sl primitives.Slot) bool {
// AvailableBlock determines if the given slot is covered by the current chain history.
// If the slot is <= backfill low slot, or >= backfill high slot, the result is true.
// If the slot is between the backfill low and high slots, the result is false.
func (s *Store) AvailableBlock(sl primitives.Slot) bool {
s.RLock()
defer s.RUnlock()
// short circuit if the node was synced from genesis
if s.genesisSync {
if s.genesisSync || sl == 0 || s.bs.LowSlot <= uint64(sl) {
return true
}
if s.StartGap() < sl && sl < s.EndGap() {
return false
}
return true
return false
}
// StartGap returns the slot at the beginning of the range that needs to be backfilled.
func (s *Status) StartGap() primitives.Slot {
return s.start
// Status is a threadsafe method to access a copy of the BackfillStatus value.
func (s *Store) status() *dbval.BackfillStatus {
s.RLock()
defer s.RUnlock()
return &dbval.BackfillStatus{
LowSlot: s.bs.LowSlot,
LowRoot: s.bs.LowRoot,
LowParentRoot: s.bs.LowParentRoot,
OriginSlot: s.bs.OriginSlot,
OriginRoot: s.bs.OriginRoot,
}
}
// EndGap returns the slot at the end of the range that needs to be backfilled.
func (s *Status) EndGap() primitives.Slot {
return s.end
// fillBack saves the slice of blocks and updates the BackfillStatus LowSlot/Root/ParentRoot tracker to the values
// from the first block in the slice. This method assumes that the block slice has been fully validated and
// sorted in slot order by the calling function.
func (s *Store) fillBack(ctx context.Context, blocks []blocks.ROBlock) (*dbval.BackfillStatus, error) {
status := s.status()
if len(blocks) == 0 {
return status, nil
}
highest := blocks[len(blocks)-1]
// The root of the highest block needs to match the parent root of the previous status. The backfill service will do
// the same check, but this is an extra defensive layer in front of the db index.
if highest.Root() != bytesutil.ToBytes32(status.LowParentRoot) {
return nil, errors.Wrapf(errBatchDisconnected, "prev parent_root=%#x, root=%#x, prev slot=%d, slot=%d",
status.LowParentRoot, highest.Root(), status.LowSlot, highest.Block().Slot())
}
if err := s.store.SaveROBlocks(ctx, blocks, false); err != nil {
return nil, errors.Wrapf(err, "error saving backfill blocks")
}
// Update finalized block index.
if err := s.store.BackfillFinalizedIndex(ctx, blocks, bytesutil.ToBytes32(status.LowRoot)); err != nil {
return nil, errors.Wrapf(err, "failed to update finalized index for batch, connecting root %#x to previously finalized block %#x",
highest.Root(), status.LowRoot)
}
// Update backfill status based on the block with the lowest slot in the batch.
lowest := blocks[0]
pr := lowest.Block().ParentRoot()
status.LowSlot = uint64(lowest.Block().Slot())
status.LowRoot = lowest.RootSlice()
status.LowParentRoot = pr[:]
return status, s.saveStatus(ctx, status)
}
var ErrAdvancePastOrigin = errors.New("cannot advance backfill Status beyond the origin checkpoint slot")
// Advance advances the backfill position to the given slot & root.
// It updates the backfill block root entry in the database,
// and also updates the Status value's copy of the backfill position slot.
func (s *Status) Advance(ctx context.Context, upTo primitives.Slot, root [32]byte) error {
if upTo > s.end {
return errors.Wrapf(ErrAdvancePastOrigin, "advance slot=%d, origin slot=%d", upTo, s.end)
// recoverLegacy will check to see if the db is from a legacy checkpoint sync, and either build a new BackfillStatus
// or label the node as synced from genesis.
func (s *Store) recoverLegacy(ctx context.Context) error {
cpr, err := s.store.OriginCheckpointBlockRoot(ctx)
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
s.genesisSync = true
return nil
}
s.start = upTo
return s.store.SaveBackfillBlockRoot(ctx, root)
cpb, err := s.store.Block(ctx, cpr)
if err != nil {
return errors.Wrapf(err, "error retrieving block for origin checkpoint root=%#x", cpr)
}
if err := blocks.BeaconBlockIsNil(cpb); err != nil {
return errors.Wrapf(err, "nil block found for origin checkpoint root=%#x", cpr)
}
os := uint64(cpb.Block().Slot())
lpr := cpb.Block().ParentRoot()
bs := &dbval.BackfillStatus{
LowSlot: os,
LowRoot: cpr[:],
LowParentRoot: lpr[:],
OriginSlot: os,
OriginRoot: cpr[:],
}
return s.saveStatus(ctx, bs)
}
// Reload queries the database for backfill status, initializing the internal data and validating the database state.
func (s *Status) Reload(ctx context.Context) error {
cpRoot, err := s.store.OriginCheckpointBlockRoot(ctx)
if err != nil {
// mark genesis sync and short circuit further lookups
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
s.genesisSync = true
return nil
}
return err
}
cpBlock, err := s.store.Block(ctx, cpRoot)
if err != nil {
return errors.Wrapf(err, "error retrieving block for origin checkpoint root=%#x", cpRoot)
}
if err := blocks.BeaconBlockIsNil(cpBlock); err != nil {
return err
}
s.end = cpBlock.Block().Slot()
_, err = s.store.GenesisBlockRoot(ctx)
if err != nil {
if errors.Is(err, db.ErrNotFoundGenesisBlockRoot) {
return errors.Wrap(err, "genesis block root required for checkpoint sync")
}
func (s *Store) saveStatus(ctx context.Context, bs *dbval.BackfillStatus) error {
if err := s.store.SaveBackfillStatus(ctx, bs); err != nil {
return err
}
bfRoot, err := s.store.BackfillBlockRoot(ctx)
if err != nil {
if errors.Is(err, db.ErrNotFoundBackfillBlockRoot) {
return errors.Wrap(err, "found origin checkpoint block root, but no backfill block root")
}
return err
}
bfBlock, err := s.store.Block(ctx, bfRoot)
if err != nil {
return errors.Wrapf(err, "error retrieving block for backfill root=%#x", bfRoot)
}
if err := blocks.BeaconBlockIsNil(bfBlock); err != nil {
return err
}
s.start = bfBlock.Block().Slot()
s.swapStatus(bs)
return nil
}
// DB describes the set of DB methods that the Status type needs to function.
type DB interface {
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
func (s *Store) swapStatus(bs *dbval.BackfillStatus) {
s.Lock()
defer s.Unlock()
s.bs = bs
}
// originState looks up the state for the checkpoint sync origin. This is a hack, because StatusUpdater is the only
// thing that needs db access and it has the origin root handy, so it's convenient to look it up here. The state is
// needed by the verifier.
func (s *Store) originState(ctx context.Context) (state.BeaconState, error) {
return s.store.StateOrError(ctx, bytesutil.ToBytes32(s.status().OriginRoot))
}
// BeaconDB describes the set of DB methods that the StatusUpdater type needs to function.
type BeaconDB interface {
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
OriginCheckpointBlockRoot(context.Context) ([32]byte, error)
Block(context.Context, [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
}

View File

@@ -1,15 +1,18 @@
package backfill
import (
"bytes"
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
@@ -19,121 +22,125 @@ var errEmptyMockDBMethod = errors.New("uninitialized mock db method called")
type mockBackfillDB struct {
saveBackfillBlockRoot func(ctx context.Context, blockRoot [32]byte) error
genesisBlockRoot func(ctx context.Context) ([32]byte, error)
originCheckpointBlockRoot func(ctx context.Context) ([32]byte, error)
backfillBlockRoot func(ctx context.Context) ([32]byte, error)
block func(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
saveBackfillStatus func(ctx context.Context, status *dbval.BackfillStatus) error
backfillStatus func(context.Context) (*dbval.BackfillStatus, error)
status *dbval.BackfillStatus
err error
states map[[32]byte]state.BeaconState
blocks map[[32]byte]blocks.ROBlock
}
var _ DB = &mockBackfillDB{}
var _ BeaconDB = &mockBackfillDB{}
func (db *mockBackfillDB) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
if db.saveBackfillBlockRoot != nil {
return db.saveBackfillBlockRoot(ctx, blockRoot)
func (d *mockBackfillDB) StateOrError(_ context.Context, blockRoot [32]byte) (state.BeaconState, error) {
st, ok := d.states[blockRoot]
if !ok {
return nil, db.ErrNotFoundState
}
return errEmptyMockDBMethod
return st, nil
}
func (db *mockBackfillDB) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
if db.genesisBlockRoot != nil {
return db.genesisBlockRoot(ctx)
func (d *mockBackfillDB) SaveBackfillStatus(ctx context.Context, status *dbval.BackfillStatus) error {
if d.saveBackfillStatus != nil {
return d.saveBackfillStatus(ctx, status)
}
d.status = status
return nil
}
func (d *mockBackfillDB) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
if d.backfillStatus != nil {
return d.backfillStatus(ctx)
}
return d.status, nil
}
func (d *mockBackfillDB) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
if d.originCheckpointBlockRoot != nil {
return d.originCheckpointBlockRoot(ctx)
}
return [32]byte{}, errEmptyMockDBMethod
}
func (db *mockBackfillDB) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
if db.originCheckpointBlockRoot != nil {
return db.originCheckpointBlockRoot(ctx)
func (d *mockBackfillDB) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
if d.block != nil {
return d.block(ctx, blockRoot)
}
return [32]byte{}, errEmptyMockDBMethod
b, ok := d.blocks[blockRoot]
if !ok {
return nil, db.ErrNotFound
}
return b, nil
}
func (db *mockBackfillDB) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
if db.backfillBlockRoot != nil {
return db.backfillBlockRoot(ctx)
func (d *mockBackfillDB) SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error {
if d.blocks == nil {
d.blocks = make(map[[32]byte]blocks.ROBlock)
}
return [32]byte{}, errEmptyMockDBMethod
for i := range blks {
d.blocks[blks[i].Root()] = blks[i]
}
return nil
}
func (db *mockBackfillDB) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
if db.block != nil {
return db.block(ctx, blockRoot)
}
return nil, errEmptyMockDBMethod
func (d *mockBackfillDB) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error {
return nil
}
func TestSlotCovered(t *testing.T) {
cases := []struct {
name string
slot primitives.Slot
status *Status
status *Store
result bool
}{
{
name: "below start true",
status: &Status{start: 1},
name: "genesis true",
status: &Store{bs: &dbval.BackfillStatus{LowSlot: 10}},
slot: 0,
result: true,
},
{
name: "above end true",
status: &Status{end: 1},
status: &Store{bs: &dbval.BackfillStatus{LowSlot: 1}},
slot: 2,
result: true,
},
{
name: "equal end true",
status: &Status{end: 1},
status: &Store{bs: &dbval.BackfillStatus{LowSlot: 1}},
slot: 1,
result: true,
},
{
name: "equal start true",
status: &Status{start: 2},
slot: 2,
result: true,
},
{
name: "between false",
status: &Status{start: 1, end: 3},
slot: 2,
result: false,
},
{
name: "genesisSync always true",
status: &Status{genesisSync: true},
status: &Store{genesisSync: true},
slot: 100,
result: true,
},
}
for _, c := range cases {
result := c.status.SlotCovered(c.slot)
require.Equal(t, c.result, result)
t.Run(c.name, func(t *testing.T) {
result := c.status.AvailableBlock(c.slot)
require.Equal(t, c.result, result)
})
}
}
func TestAdvance(t *testing.T) {
func TestStatusUpdater_FillBack(t *testing.T) {
ctx := context.Background()
saveBackfillBuf := make([][32]byte, 0)
mdb := &mockBackfillDB{
saveBackfillBlockRoot: func(ctx context.Context, root [32]byte) error {
saveBackfillBuf = append(saveBackfillBuf, root)
return nil
},
}
s := &Status{end: 100, store: mdb}
var root [32]byte
copy(root[:], []byte{0x23, 0x23})
require.NoError(t, s.Advance(ctx, 90, root))
require.Equal(t, root, saveBackfillBuf[0])
not := s.SlotCovered(95)
require.Equal(t, false, not)
// this should still be len 1 after failing to advance
require.Equal(t, 1, len(saveBackfillBuf))
require.ErrorIs(t, s.Advance(ctx, s.end+1, root), ErrAdvancePastOrigin)
// this has an element in it from the previous test, there shouldn't be an additional one
require.Equal(t, 1, len(saveBackfillBuf))
mdb := &mockBackfillDB{}
b, err := setupTestBlock(90)
require.NoError(t, err)
rob, err := blocks.NewROBlock(b)
require.NoError(t, err)
s := &Store{bs: &dbval.BackfillStatus{LowSlot: 100, LowParentRoot: rob.RootSlice()}, store: mdb}
require.Equal(t, false, s.AvailableBlock(95))
_, err = s.fillBack(ctx, []blocks.ROBlock{rob})
require.NoError(t, err)
require.Equal(t, true, s.AvailableBlock(95))
}
func goodBlockRoot(root [32]byte) func(ctx context.Context) ([32]byte, error) {
@@ -151,9 +158,8 @@ func setupTestBlock(slot primitives.Slot) (interfaces.ReadOnlySignedBeaconBlock,
return blocktest.SetBlockSlot(b, slot)
}
func TestReload(t *testing.T) {
func TestNewUpdater(t *testing.T) {
ctx := context.Background()
derp := errors.New("derp")
originSlot := primitives.Slot(100)
var originRoot [32]byte
@@ -163,164 +169,39 @@ func TestReload(t *testing.T) {
backfillSlot := primitives.Slot(50)
var backfillRoot [32]byte
copy(originRoot[:], []byte{0x02})
copy(backfillRoot[:], []byte{0x02})
backfillBlock, err := setupTestBlock(backfillSlot)
require.NoError(t, err)
var parentRoot [32]byte
copy(parentRoot[:], []byte{0x03})
var rootSlice = func(r [32]byte) []byte { return r[:] }
typicalBackfillStatus := &dbval.BackfillStatus{
LowSlot: 23,
LowRoot: backfillRoot[:],
LowParentRoot: parentRoot[:],
OriginSlot: 1123,
OriginRoot: originRoot[:],
}
cases := []struct {
name string
db DB
db BeaconDB
err error
expected *Status
expected *Store
}{
/*{
{
name: "origin not found, implying genesis sync ",
db: &mockBackfillDB{
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
backfillStatus: func(context.Context) (*dbval.BackfillStatus, error) {
return nil, db.ErrNotFound
},
originCheckpointBlockRoot: func(ctx context.Context) ([32]byte, error) {
return [32]byte{}, db.ErrNotFoundOriginBlockRoot
}},
expected: &Status{genesisSync: true},
expected: &Store{genesisSync: true},
},
{
name: "genesis not found error",
err: db.ErrNotFoundGenesisBlockRoot,
name: "legacy recovery",
db: &mockBackfillDB{
genesisBlockRoot: func(ctx context.Context) ([32]byte, error) {
return [32]byte{}, db.ErrNotFoundGenesisBlockRoot
},
originCheckpointBlockRoot: goodBlockRoot(originRoot),
block: func(ctx context.Context, root [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
switch root {
case originRoot:
return originBlock, nil
}
return nil, nil
},
},
},
{
name: "other genesis error",
err: derp,
db: &mockBackfillDB{
genesisBlockRoot: func(ctx context.Context) ([32]byte, error) {
return [32]byte{}, derp
},
originCheckpointBlockRoot: goodBlockRoot(originRoot),
block: func(ctx context.Context, root [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
switch root {
case originRoot:
return originBlock, nil
}
return nil, nil
},
},
},
{
name: "origin other error",
db: &mockBackfillDB{
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
originCheckpointBlockRoot: func(ctx context.Context) ([32]byte, error) {
return [32]byte{}, derp
}},
err: derp,
},
{
name: "origin root found, block missing",
db: &mockBackfillDB{
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
originCheckpointBlockRoot: goodBlockRoot(originRoot),
block: func(ctx context.Context, root [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
return nil, nil
},
},
err: blocks.ErrNilSignedBeaconBlock,
},
{
name: "origin root found, block error",
db: &mockBackfillDB{
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
originCheckpointBlockRoot: goodBlockRoot(originRoot),
block: func(ctx context.Context, root [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
return nil, derp
},
},
err: derp,
},
{
name: "origin root found, block found, backfill root not found",
db: &mockBackfillDB{
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
originCheckpointBlockRoot: goodBlockRoot(originRoot),
block: func(ctx context.Context, root [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
return originBlock, nil
},
backfillBlockRoot: func(ctx context.Context) ([32]byte, error) {
return [32]byte{}, db.ErrNotFoundBackfillBlockRoot
},
},
err: db.ErrNotFoundBackfillBlockRoot,
},
{
name: "origin root found, block found, random backfill root err",
db: &mockBackfillDB{
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
originCheckpointBlockRoot: goodBlockRoot(originRoot),
block: func(ctx context.Context, root [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
switch root {
case originRoot:
return originBlock, nil
case backfillRoot:
return nil, nil
}
return nil, derp
},
backfillBlockRoot: func(ctx context.Context) ([32]byte, error) {
return [32]byte{}, derp
},
},
err: derp,
},
{
name: "origin root found, block found, backfill root found, backfill block not found",
db: &mockBackfillDB{
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
originCheckpointBlockRoot: goodBlockRoot(originRoot),
block: func(ctx context.Context, root [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
switch root {
case originRoot:
return originBlock, nil
case backfillRoot:
return nil, nil
}
return nil, derp
},
backfillBlockRoot: goodBlockRoot(backfillRoot),
},
err: blocks.ErrNilSignedBeaconBlock,
},
{
name: "origin root found, block found, backfill root found, backfill block random err",
db: &mockBackfillDB{
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
originCheckpointBlockRoot: goodBlockRoot(originRoot),
block: func(ctx context.Context, root [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
switch root {
case originRoot:
return originBlock, nil
case backfillRoot:
return nil, derp
}
return nil, errors.New("not derp")
},
backfillBlockRoot: goodBlockRoot(backfillRoot),
},
err: derp,
},*/
{
name: "complete happy path",
db: &mockBackfillDB{
genesisBlockRoot: goodBlockRoot(params.BeaconConfig().ZeroHash),
originCheckpointBlockRoot: goodBlockRoot(originRoot),
block: func(ctx context.Context, root [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
switch root {
@@ -331,28 +212,44 @@ func TestReload(t *testing.T) {
}
return nil, errors.New("not derp")
},
backfillBlockRoot: goodBlockRoot(backfillRoot),
backfillStatus: func(context.Context) (*dbval.BackfillStatus, error) { return nil, db.ErrNotFound },
},
err: derp,
expected: &Status{genesisSync: false, start: backfillSlot, end: originSlot},
expected: &Store{bs: &dbval.BackfillStatus{
LowSlot: uint64(originSlot), OriginSlot: uint64(originSlot),
LowRoot: originRoot[:], OriginRoot: originRoot[:], LowParentRoot: rootSlice(originBlock.Block().ParentRoot()),
}},
},
{
name: "backfill found",
db: &mockBackfillDB{backfillStatus: func(ctx context.Context) (*dbval.BackfillStatus, error) {
return typicalBackfillStatus, nil
}},
expected: &Store{bs: typicalBackfillStatus},
},
}
for _, c := range cases {
s := &Status{
store: c.db,
}
err := s.Reload(ctx)
if err != nil {
require.ErrorIs(t, err, c.err)
continue
}
require.NoError(t, err)
if c.expected == nil {
continue
}
require.Equal(t, c.expected.genesisSync, s.genesisSync)
require.Equal(t, c.expected.start, s.start)
require.Equal(t, c.expected.end, s.end)
t.Run(c.name, func(t *testing.T) {
s, err := NewUpdater(ctx, c.db)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
if c.expected == nil {
return
}
require.Equal(t, c.expected.genesisSync, s.genesisSync)
if c.expected.genesisSync {
require.IsNil(t, s.bs)
return
}
require.Equal(t, c.expected.bs.LowSlot, s.bs.LowSlot)
require.Equal(t, c.expected.bs.OriginSlot, s.bs.OriginSlot)
require.Equal(t, true, bytes.Equal(c.expected.bs.OriginRoot, s.bs.OriginRoot))
require.Equal(t, true, bytes.Equal(c.expected.bs.LowRoot, s.bs.LowRoot))
require.Equal(t, true, bytes.Equal(c.expected.bs.LowParentRoot, s.bs.LowParentRoot))
})
}
}

View File

@@ -0,0 +1,125 @@
package backfill
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/network/forks"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
var errInvalidBatchChain = errors.New("parent_root of block does not match the previous block's root")
var errProposerIndexTooHigh = errors.New("proposer index not present in origin state")
var errUnknownDomain = errors.New("runtime error looking up signing domain for fork")
// VerifiedROBlocks represents a slice of blocks that have passed signature verification.
type VerifiedROBlocks []blocks.ROBlock
type verifier struct {
keys [][fieldparams.BLSPubkeyLength]byte
maxVal primitives.ValidatorIndex
domain *domainCache
}
// TODO: rewrite this to use ROBlock.
func (vr verifier) verify(blks []interfaces.ReadOnlySignedBeaconBlock) (VerifiedROBlocks, error) {
var err error
result := make([]blocks.ROBlock, len(blks))
sigSet := bls.NewSet()
for i := range blks {
result[i], err = blocks.NewROBlock(blks[i])
if err != nil {
return nil, err
}
if i > 0 && result[i-1].Root() != result[i].Block().ParentRoot() {
p, b := result[i-1], result[i]
return nil, errors.Wrapf(errInvalidBatchChain,
"slot %d parent_root=%#x, slot %d root=%#x",
b.Block().Slot(), b.Block().ParentRoot(),
p.Block().Slot(), p.Root())
}
set, err := vr.blockSignatureBatch(result[i])
if err != nil {
return nil, err
}
sigSet.Join(set)
}
v, err := sigSet.Verify()
if err != nil {
return nil, errors.Wrap(err, "block signature verification error")
}
if !v {
return nil, errors.New("batch block signature verification failed")
}
return result, nil
}
func (vr verifier) blockSignatureBatch(b blocks.ROBlock) (*bls.SignatureBatch, error) {
pidx := b.Block().ProposerIndex()
if pidx > vr.maxVal {
return nil, errProposerIndexTooHigh
}
dom, err := vr.domain.forEpoch(slots.ToEpoch(b.Block().Slot()))
if err != nil {
return nil, err
}
sig := b.Signature()
pk := vr.keys[pidx][:]
root := b.Root()
rootF := func() ([32]byte, error) { return root, nil }
return signing.BlockSignatureBatch(pk, sig[:], dom, rootF)
}
func newBackfillVerifier(vr []byte, keys [][fieldparams.BLSPubkeyLength]byte) (*verifier, error) {
dc, err := newDomainCache(vr, params.BeaconConfig().DomainBeaconProposer,
forks.NewOrderedSchedule(params.BeaconConfig()))
if err != nil {
return nil, err
}
v := &verifier{
keys: keys,
domain: dc,
}
v.maxVal = primitives.ValidatorIndex(len(v.keys) - 1)
return v, nil
}
// domainCache provides a fast signing domain lookup by epoch.
type domainCache struct {
fsched forks.OrderedSchedule
forkDomains map[[4]byte][]byte
dType [bls.DomainByteLength]byte
}
func newDomainCache(vRoot []byte, dType [bls.DomainByteLength]byte, fsched forks.OrderedSchedule) (*domainCache, error) {
dc := &domainCache{
fsched: fsched,
forkDomains: make(map[[4]byte][]byte),
dType: dType,
}
for _, entry := range fsched {
d, err := signing.ComputeDomain(dc.dType, entry.Version[:], vRoot)
if err != nil {
return nil, errors.Wrapf(err, "failed to pre-compute signing domain for fork version=%#x", entry.Version)
}
dc.forkDomains[entry.Version] = d
}
return dc, nil
}
func (dc *domainCache) forEpoch(e primitives.Epoch) ([]byte, error) {
fork, err := dc.fsched.VersionForEpoch(e)
if err != nil {
return nil, err
}
d, ok := dc.forkDomains[fork]
if !ok {
return nil, errors.Wrapf(errUnknownDomain, "fork version=%#x, epoch=%d", fork, e)
}
return d, nil
}

View File

@@ -0,0 +1,75 @@
package backfill
import (
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/network/forks"
"github.com/prysmaticlabs/prysm/v4/runtime/interop"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestDomainCache(t *testing.T) {
cfg := params.MainnetConfig()
vRoot, err := hexutil.Decode("0x0011223344556677889900112233445566778899001122334455667788990011")
dType := cfg.DomainBeaconProposer
require.NoError(t, err)
require.Equal(t, 32, len(vRoot))
fsched := forks.NewOrderedSchedule(cfg)
dc, err := newDomainCache(vRoot,
dType, fsched)
require.NoError(t, err)
require.Equal(t, len(fsched), len(dc.forkDomains))
for i := range fsched {
e := fsched[i].Epoch
ad, err := dc.forEpoch(e)
require.NoError(t, err)
ed, err := signing.ComputeDomain(dType, fsched[i].Version[:], vRoot)
require.NoError(t, err)
require.DeepEqual(t, ed, ad)
}
}
func testBlocksWithKeys(t *testing.T, nBlocks uint64, nBlobs int, vr []byte) ([]blocks.ROBlock, [][]blocks.ROBlob, []bls.SecretKey, []bls.PublicKey) {
blks := make([]blocks.ROBlock, nBlocks)
blbs := make([][]blocks.ROBlob, nBlocks)
sks, pks, err := interop.DeterministicallyGenerateKeys(0, nBlocks)
require.NoError(t, err)
prevRoot := [32]byte{}
for i := uint64(0); i < nBlocks; i++ {
block, blobs := util.GenerateTestDenebBlockWithSidecar(t, prevRoot, primitives.Slot(i), nBlobs, util.WithProposerSigning(primitives.ValidatorIndex(i), sks[i], vr))
prevRoot = block.Root()
blks[i] = block
blbs[i] = blobs
}
return blks, blbs, sks, pks
}
func TestVerify(t *testing.T) {
vr := make([]byte, 32)
copy(vr, []byte("yooooo"))
blks, _, _, pks := testBlocksWithKeys(t, 2, 0, vr)
pubkeys := make([][fieldparams.BLSPubkeyLength]byte, len(pks))
for i := range pks {
pubkeys[i] = bytesutil.ToBytes48(pks[i].Marshal())
}
v, err := newBackfillVerifier(vr, pubkeys)
require.NoError(t, err)
notrob := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
// We have to unwrap the ROBlocks for this code because that's what it expects (for now).
for i := range blks {
notrob[i] = blks[i].ReadOnlySignedBeaconBlock
}
vbs, err := v.verify(notrob)
require.NoError(t, err)
require.Equal(t, len(blks), len(vbs))
}

View File

@@ -0,0 +1,72 @@
package backfill
import (
"context"
"time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
log "github.com/sirupsen/logrus"
)
type workerId int
type p2pWorker struct {
id workerId
todo chan batch
done chan batch
p2p p2p.P2P
v *verifier
c *startup.Clock
}
func (w *p2pWorker) run(ctx context.Context) {
for {
select {
case b := <-w.todo:
log.WithFields(b.logFields()).WithField("backfill_worker", w.id).Debug("Backfill worker received batch.")
w.done <- w.handle(ctx, b)
case <-ctx.Done():
log.WithField("backfill_worker", w.id).Info("Backfill worker exiting after context canceled.")
return
}
}
}
func (w *p2pWorker) handle(ctx context.Context, b batch) batch {
start := time.Now()
results, err := sync.SendBeaconBlocksByRangeRequest(ctx, w.c, w.p2p, b.pid, b.request(), nil)
dlt := time.Now()
backfillBatchTimeDownloading.Observe(float64(dlt.Sub(start).Milliseconds()))
if err != nil {
return b.withRetryableError(err)
}
vb, err := w.v.verify(results)
backfillBatchTimeVerifying.Observe(float64(time.Since(dlt).Milliseconds()))
if err != nil {
return b.withRetryableError(err)
}
// This is a hack to get the rough size of the batch. This helps us approximate the amount of memory needed
// to hold batches and relative sizes between batches, but will be inaccurate when it comes to measuring actual
// bytes downloaded from peers, mainly because the p2p messages are snappy compressed.
bdl := 0
for i := range vb {
bdl += vb[i].SizeSSZ()
}
backfillBatchApproximateBytes.Add(float64(bdl))
log.WithField("dlbytes", bdl).Debug("backfill batch bytes downloaded")
b.results = vb
return b.withState(batchImportable)
}
func newP2pWorker(id workerId, p p2p.P2P, todo, done chan batch, c *startup.Clock, v *verifier) *p2pWorker {
return &p2pWorker{
id: id,
todo: todo,
done: done,
p2p: p,
v: v,
c: c,
}
}

View File

@@ -517,7 +517,7 @@ func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecar
"capacity": f.rateLimiter.Remaining(pid.String()),
"score": f.p2p.Peers().Scorers().BlockProviderScorer().FormatScorePretty(pid),
}).Debug("Requesting blobs")
// We're intentionally abusing the block rate limit here, treating blob requests as if they were blob requests.
// We're intentionally abusing the block rate limit here, treating blob requests as if they were block requests.
// Since blob requests take more bandwidth than blocks, we should improve how we account for the different kinds
// of requests, more in proportion to the cost of serving them.
if f.rateLimiter.Remaining(pid.String()) < int64(req.Count) {

View File

@@ -25,14 +25,22 @@ import (
"github.com/sirupsen/logrus"
)
// ErrInvalidFetchedData is used to signal that an error occurred which should result in peer downscoring.
var ErrInvalidFetchedData = errors.New("invalid data returned from peer")
var errMaxRequestBlobSidecarsExceeded = errors.Wrap(ErrInvalidFetchedData, "peer exceeded req blob chunk tx limit")
var errBlobChunkedReadFailure = errors.New("failed to read stream of chunk-encoded blobs")
var errBlobUnmarshal = errors.New("Could not unmarshal chunk-encoded blob")
var errUnrequested = errors.New("Received BlobSidecar in response that was not requested")
var errBlobResponseOutOfBounds = errors.New("received BlobSidecar with slot outside BlobSidecarsByRangeRequest bounds")
// Any error from the following declaration block should result in peer downscoring.
var (
// ErrInvalidFetchedData is used to signal that an error occurred which should result in peer downscoring.
ErrInvalidFetchedData = errors.New("invalid data returned from peer")
errBlobIndexOutOfBounds = errors.Wrap(ErrInvalidFetchedData, "blob index out of range")
errMaxRequestBlobSidecarsExceeded = errors.Wrap(ErrInvalidFetchedData, "peer exceeded req blob chunk tx limit")
errChunkResponseSlotNotAsc = errors.Wrap(ErrInvalidFetchedData, "blob slot not higher than previous block root")
errChunkResponseIndexNotAsc = errors.Wrap(ErrInvalidFetchedData, "blob indices for a block must start at 0 and increase by 1")
errUnrequested = errors.Wrap(ErrInvalidFetchedData, "received BlobSidecar in response that was not requested")
errBlobResponseOutOfBounds = errors.Wrap(ErrInvalidFetchedData, "received BlobSidecar with slot outside BlobSidecarsByRangeRequest bounds")
errChunkResponseBlockMismatch = errors.Wrap(ErrInvalidFetchedData, "blob block details do not match")
errChunkResponseParentMismatch = errors.Wrap(ErrInvalidFetchedData, "parent root for response element doesn't match previous element root")
)
// BeaconBlockProcessor defines a block processing function, which allows to start utilizing
// blocks even before all blocks are ready.
@@ -167,7 +175,8 @@ func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle,
if max > req.Count*fieldparams.MaxBlobsPerBlock {
max = req.Count * fieldparams.MaxBlobsPerBlock
}
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, blobValidatorFromRangeReq(req), max)
blobVal := composeBlobValidations(blobValidatorFromRangeReq(req), newSequentialBlobValidator())
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, blobVal, max)
}
func SendBlobSidecarByRoot(
@@ -198,6 +207,70 @@ func SendBlobSidecarByRoot(
type blobResponseValidation func(blocks.ROBlob) error
func composeBlobValidations(vf ...blobResponseValidation) blobResponseValidation {
return func(blob blocks.ROBlob) error {
for i := range vf {
if err := vf[i](blob); err != nil {
return err
}
}
return nil
}
}
type seqBlobValid struct {
prev *blocks.ROBlob
}
func (sbv *seqBlobValid) nextValid(blob blocks.ROBlob) error {
if blob.Index >= fieldparams.MaxBlobsPerBlock {
return errBlobIndexOutOfBounds
}
if sbv.prev == nil {
// The first blob we see for a block must have index 0.
if blob.Index != 0 {
return errChunkResponseIndexNotAsc
}
sbv.prev = &blob
return nil
}
if sbv.prev.Slot() == blob.Slot() {
if sbv.prev.BlockRoot() != blob.BlockRoot() {
return errors.Wrap(errChunkResponseBlockMismatch, "block roots do not match")
}
if sbv.prev.ParentRoot() != blob.ParentRoot() {
return errors.Wrap(errChunkResponseBlockMismatch, "block parent roots do not match")
}
// Blob indices in responses should be strictly monotonically incrementing.
if blob.Index != sbv.prev.Index+1 {
return errChunkResponseIndexNotAsc
}
} else {
// If the slot is adjacent we know there are no intervening blocks with missing blobs, so we can
// check that the new blob descends from the last seen.
if blob.Slot() == sbv.prev.Slot()+1 && blob.ParentRoot() != sbv.prev.BlockRoot() {
return errChunkResponseParentMismatch
}
// The first blob we see for a block must have index 0.
if blob.Index != 0 {
return errChunkResponseIndexNotAsc
}
// Blocks must be in ascending slot order.
if sbv.prev.Slot() >= blob.Slot() {
return errChunkResponseSlotNotAsc
}
}
sbv.prev = &blob
return nil
}
func newSequentialBlobValidator() blobResponseValidation {
sbv := &seqBlobValid{}
return func(blob blocks.ROBlob) error {
return sbv.nextValid(blob)
}
}
func blobValidatorFromRootReq(req *p2ptypes.BlobSidecarsByRootReq) blobResponseValidation {
blobIds := make(map[[32]byte]map[uint64]bool)
for _, sc := range *req {

View File

@@ -13,6 +13,7 @@ import (
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -601,3 +602,88 @@ func TestBlobValidatorFromRangeReq(t *testing.T) {
})
}
}
func TestSeqBlobValid(t *testing.T) {
one, oneBlobs := generateTestBlockWithSidecars(t, [32]byte{}, 0, 3)
r1, err := one.Block.HashTreeRoot()
require.NoError(t, err)
two, twoBlobs := generateTestBlockWithSidecars(t, r1, 1, 3)
r2, err := two.Block.HashTreeRoot()
require.NoError(t, err)
_, oops := generateTestBlockWithSidecars(t, r2, 0, 4)
oops[1].SignedBlockHeader.Header.ParentRoot = bytesutil.PadTo([]byte("derp"), 32)
wrongRoot, err := blocks.NewROBlobWithRoot(oops[2].BlobSidecar, bytesutil.ToBytes32([]byte("parentderp")))
require.NoError(t, err)
oob := oops[3]
oob.Index = fieldparams.MaxBlobsPerBlock
cases := []struct {
name string
seq []blocks.ROBlob
err error
errAt int
}{
{
name: "all valid",
seq: append(append([]blocks.ROBlob{}, oneBlobs...), twoBlobs...),
},
{
name: "idx out of bounds",
seq: []blocks.ROBlob{oob},
err: errBlobIndexOutOfBounds,
},
{
name: "first index is not zero",
seq: []blocks.ROBlob{oneBlobs[1]},
err: errChunkResponseIndexNotAsc,
},
{
name: "index out of order, same block",
seq: []blocks.ROBlob{oneBlobs[1], oneBlobs[0]},
err: errChunkResponseIndexNotAsc,
},
{
name: "second block starts at idx 1",
seq: []blocks.ROBlob{oneBlobs[0], twoBlobs[1]},
err: errChunkResponseIndexNotAsc,
errAt: 1,
},
{
name: "slots not ascending",
seq: append(append([]blocks.ROBlob{}, twoBlobs...), oops...),
err: errChunkResponseSlotNotAsc,
errAt: len(twoBlobs),
},
{
name: "same slot, different root",
seq: []blocks.ROBlob{oops[0], wrongRoot},
err: errChunkResponseBlockMismatch,
errAt: 1,
},
{
name: "same slot, different parent root",
seq: []blocks.ROBlob{oops[0], oops[1]},
err: errChunkResponseBlockMismatch,
errAt: 1,
},
{
name: "next slot, different parent root",
seq: []blocks.ROBlob{oops[0], twoBlobs[0]},
err: errChunkResponseParentMismatch,
errAt: 1,
},
}
for _, c := range cases {
sbv := newSequentialBlobValidator()
t.Run(c.name, func(t *testing.T) {
for i := range c.seq {
err := sbv(c.seq[i])
if c.err != nil && i == c.errAt {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
}
})
}
}

View File

@@ -210,7 +210,7 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
"error": err,
}).Debug("Invalid status message from peer")
respCode := byte(0)
var respCode byte
switch err {
case p2ptypes.ErrGeneric:
respCode = responseCodeServerError

View File

@@ -257,7 +257,8 @@ func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle s
func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, pubsub.ValidatorEx) {
return topic, func(ctx context.Context, pid peer.ID, msg *pubsub.Message) (res pubsub.ValidationResult) {
defer messagehandler.HandlePanic(ctx, msg)
res = pubsub.ValidationIgnore // Default: ignore any message that panics.
// Default: ignore any message that panics.
res = pubsub.ValidationIgnore // nolint:wastedassign
ctx, cancel := context.WithTimeout(ctx, pubsubMessageTimeout)
defer cancel()
messageReceivedCounter.WithLabelValues(topic).Inc()
@@ -781,10 +782,8 @@ func isDigestValid(digest [4]byte, genesis time.Time, genValRoot [32]byte) (bool
}
func agentString(pid peer.ID, hst host.Host) string {
agString := ""
ok := false
rawVersion, storeErr := hst.Peerstore().Get(pid, "AgentVersion")
agString, ok = rawVersion.(string)
agString, ok := rawVersion.(string)
if storeErr != nil || !ok {
agString = ""
}

View File

@@ -152,10 +152,11 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
}
// Verify validator index is within the beacon committee.
if err := validateIndexInCommittee(ctx, bs, signed.Message.Aggregate, signed.Message.AggregatorIndex); err != nil {
result, err := s.validateIndexInCommittee(ctx, bs, signed.Message.Aggregate, signed.Message.AggregatorIndex)
if result != pubsub.ValidationAccept {
wrappedErr := errors.Wrapf(err, "Could not validate index in committee")
tracing.AnnotateError(span, wrappedErr)
return pubsub.ValidationReject, wrappedErr
return result, wrappedErr
}
// Verify selection proof reflects to the right validator.
@@ -216,15 +217,32 @@ func (s *Service) setAggregatorIndexEpochSeen(epoch primitives.Epoch, aggregator
s.seenAggregatedAttestationCache.Add(string(b), true)
}
// This validates the aggregator's index in state is within the beacon committee.
func validateIndexInCommittee(ctx context.Context, bs state.ReadOnlyBeaconState, a *ethpb.Attestation, validatorIndex primitives.ValidatorIndex) error {
// This validates the bitfield is correct and aggregator's index in state is within the beacon committee.
// It implements the following checks from the consensus spec:
// - [REJECT] The committee index is within the expected range -- i.e. `aggregate.data.index < get_committee_count_per_slot(state, aggregate.data.target.epoch)`.
// - [REJECT] The number of aggregation bits matches the committee size --
// i.e. len(aggregate.aggregation_bits) == len(get_beacon_committee(state, aggregate.data.slot, aggregate.data.index)).
// - [REJECT] The aggregate attestation has participants -- that is, len(get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)) >= 1.
// - [REJECT] The aggregator's validator index is within the committee --
// i.e. `aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, aggregate.data.index)`.
func (s *Service) validateIndexInCommittee(ctx context.Context, bs state.ReadOnlyBeaconState, a *ethpb.Attestation, validatorIndex primitives.ValidatorIndex) (pubsub.ValidationResult, error) {
ctx, span := trace.StartSpan(ctx, "sync.validateIndexInCommittee")
defer span.End()
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return err
_, result, err := s.validateCommitteeIndex(ctx, a, bs)
if result != pubsub.ValidationAccept {
return result, err
}
committee, result, err := s.validateBitLength(ctx, a, bs)
if result != pubsub.ValidationAccept {
return result, err
}
if a.AggregationBits.Count() == 0 {
return pubsub.ValidationReject, errors.New("no attesting indices")
}
var withinCommittee bool
for _, i := range committee {
if validatorIndex == i {
@@ -233,10 +251,10 @@ func validateIndexInCommittee(ctx context.Context, bs state.ReadOnlyBeaconState,
}
}
if !withinCommittee {
return fmt.Errorf("validator index %d is not within the committee: %v",
return pubsub.ValidationReject, fmt.Errorf("validator index %d is not within the committee: %v",
validatorIndex, committee)
}
return nil
return pubsub.ValidationAccept, nil
}
// This validates selection proof by validating it's from the correct validator index of the slot.

View File

@@ -37,6 +37,7 @@ func TestVerifyIndexInCommittee_CanVerify(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
service := &Service{}
validators := uint64(32)
s, _ := util.DeterministicGenesisState(t, validators)
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch))
@@ -51,10 +52,14 @@ func TestVerifyIndexInCommittee_CanVerify(t *testing.T) {
assert.NoError(t, err)
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
require.NoError(t, err)
require.NoError(t, validateIndexInCommittee(ctx, s, att, primitives.ValidatorIndex(indices[0])))
result, err := service.validateIndexInCommittee(ctx, s, att, primitives.ValidatorIndex(indices[0]))
require.NoError(t, err)
assert.Equal(t, pubsub.ValidationAccept, result)
wanted := "validator index 1000 is not within the committee"
assert.ErrorContains(t, wanted, validateIndexInCommittee(ctx, s, att, 1000))
result, err = service.validateIndexInCommittee(ctx, s, att, 1000)
assert.ErrorContains(t, wanted, err)
assert.Equal(t, pubsub.ValidationReject, result)
}
func TestVerifyIndexInCommittee_ExistsInBeaconCommittee(t *testing.T) {
@@ -66,18 +71,40 @@ func TestVerifyIndexInCommittee_ExistsInBeaconCommittee(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, validators)
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch))
bf := []byte{0xff}
att := &ethpb.Attestation{Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{Epoch: 0}},
AggregationBits: bf}
Target: &ethpb.Checkpoint{Epoch: 0}}}
committee, err := helpers.BeaconCommitteeFromState(context.Background(), s, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
require.NoError(t, validateIndexInCommittee(ctx, s, att, committee[0]))
bl := bitfield.NewBitlist(uint64(len(committee)))
att.AggregationBits = bl
service := &Service{}
result, err := service.validateIndexInCommittee(ctx, s, att, committee[0])
require.ErrorContains(t, "no attesting indices", err)
assert.Equal(t, pubsub.ValidationReject, result)
att.AggregationBits.SetBitAt(0, true)
result, err = service.validateIndexInCommittee(ctx, s, att, committee[0])
require.NoError(t, err)
assert.Equal(t, pubsub.ValidationAccept, result)
wanted := "validator index 1000 is not within the committee"
assert.ErrorContains(t, wanted, validateIndexInCommittee(ctx, s, att, 1000))
result, err = service.validateIndexInCommittee(ctx, s, att, 1000)
assert.ErrorContains(t, wanted, err)
assert.Equal(t, pubsub.ValidationReject, result)
att.AggregationBits = bitfield.NewBitlist(1)
result, err = service.validateIndexInCommittee(ctx, s, att, committee[0])
require.ErrorContains(t, "wanted participants bitfield length 4, got: 1", err)
assert.Equal(t, pubsub.ValidationReject, result)
att.Data.CommitteeIndex = 10000
result, err = service.validateIndexInCommittee(ctx, s, att, committee[0])
require.ErrorContains(t, "committee index 10000 > 2", err)
assert.Equal(t, pubsub.ValidationReject, result)
}
func TestVerifySelection_NotAnAggregator(t *testing.T) {

View File

@@ -175,14 +175,9 @@ func (s *Service) validateUnaggregatedAttTopic(ctx context.Context, a *eth.Attes
ctx, span := trace.StartSpan(ctx, "sync.validateUnaggregatedAttTopic")
defer span.End()
valCount, err := helpers.ActiveValidatorCount(ctx, bs, slots.ToEpoch(a.Data.Slot))
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
count := helpers.SlotCommitteeCount(valCount)
if uint64(a.Data.CommitteeIndex) > count {
return pubsub.ValidationReject, errors.Errorf("committee index %d > %d", a.Data.CommitteeIndex, count)
valCount, result, err := s.validateCommitteeIndex(ctx, a, bs)
if result != pubsub.ValidationAccept {
return result, err
}
subnet := helpers.ComputeSubnetForAttestation(valCount, a)
format := p2p.GossipTypeMapping[reflect.TypeOf(&eth.Attestation{})]
@@ -198,21 +193,27 @@ func (s *Service) validateUnaggregatedAttTopic(ctx context.Context, a *eth.Attes
return pubsub.ValidationAccept, nil
}
func (s *Service) validateCommitteeIndex(ctx context.Context, a *eth.Attestation, bs state.ReadOnlyBeaconState) (uint64, pubsub.ValidationResult, error) {
valCount, err := helpers.ActiveValidatorCount(ctx, bs, slots.ToEpoch(a.Data.Slot))
if err != nil {
return 0, pubsub.ValidationIgnore, err
}
count := helpers.SlotCommitteeCount(valCount)
if uint64(a.Data.CommitteeIndex) > count {
return 0, pubsub.ValidationReject, errors.Errorf("committee index %d > %d", a.Data.CommitteeIndex, count)
}
return valCount, pubsub.ValidationAccept, nil
}
// This validates beacon unaggregated attestation using the given state, the validation consists of bitfield length and count consistency
// and signature verification.
func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a *eth.Attestation, bs state.ReadOnlyBeaconState) (pubsub.ValidationResult, error) {
ctx, span := trace.StartSpan(ctx, "sync.validateUnaggregatedAttWithState")
defer span.End()
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
// Verify number of aggregation bits matches the committee size.
if err := helpers.VerifyBitfieldLength(a.AggregationBits, uint64(len(committee))); err != nil {
return pubsub.ValidationReject, err
committee, result, err := s.validateBitLength(ctx, a, bs)
if result != pubsub.ValidationAccept {
return result, err
}
// Attestation must be unaggregated and the bit index must exist in the range of committee indices.
@@ -231,6 +232,20 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a *eth.A
return s.validateWithBatchVerifier(ctx, "attestation", set)
}
func (s *Service) validateBitLength(ctx context.Context, a *eth.Attestation, bs state.ReadOnlyBeaconState) ([]primitives.ValidatorIndex, pubsub.ValidationResult, error) {
committee, err := helpers.BeaconCommitteeFromState(ctx, bs, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return nil, pubsub.ValidationIgnore, err
}
// Verify number of aggregation bits matches the committee size.
if err := helpers.VerifyBitfieldLength(a.AggregationBits, uint64(len(committee))); err != nil {
return nil, pubsub.ValidationReject, err
}
return committee, pubsub.ValidationAccept, nil
}
// Returns true if the attestation was already seen for the participating validator for the slot.
func (s *Service) hasSeenCommitteeIndicesSlot(slot primitives.Slot, committeeID primitives.CommitteeIndex, aggregateBits []byte) bool {
s.seenUnAggregatedAttestationLock.RLock()

View File

@@ -241,36 +241,11 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk interfaces.ReadOn
return err
}
if !s.cfg.chain.InForkchoice(blk.Block().ParentRoot()) {
s.setBadBlock(ctx, blockRoot)
return blockchain.ErrNotDescendantOfFinalized
}
parentState, err := s.cfg.stateGen.StateByRoot(ctx, blk.Block().ParentRoot())
parentState, err := s.validatePhase0Block(ctx, blk, blockRoot)
if err != nil {
return err
}
if err := blocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk); err != nil {
s.setBadBlock(ctx, blockRoot)
return err
}
// In the event the block is more than an epoch ahead from its
// parent state, we have to advance the state forward.
parentRoot := blk.Block().ParentRoot()
parentState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, parentState, parentRoot[:], blk.Block().Slot())
if err != nil {
return err
}
idx, err := helpers.BeaconProposerIndex(ctx, parentState)
if err != nil {
return err
}
if blk.Block().ProposerIndex() != idx {
s.setBadBlock(ctx, blockRoot)
return errors.New("incorrect proposer index")
}
if err = s.validateBellatrixBeaconBlock(ctx, parentState, blk.Block()); err != nil {
if errors.Is(err, ErrOptimisticParent) {
return err
@@ -282,6 +257,42 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk interfaces.ReadOn
return nil
}
// Validates beacon block according to phase 0 validity conditions.
// - Checks that the parent is in our forkchoice tree.
// - Validates that the proposer signature is valid.
// - Validates that the proposer index is valid.
func (s *Service) validatePhase0Block(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (state.BeaconState, error) {
if !s.cfg.chain.InForkchoice(blk.Block().ParentRoot()) {
s.setBadBlock(ctx, blockRoot)
return nil, blockchain.ErrNotDescendantOfFinalized
}
parentState, err := s.cfg.stateGen.StateByRoot(ctx, blk.Block().ParentRoot())
if err != nil {
return nil, err
}
if err := blocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk); err != nil {
return nil, err
}
// In the event the block is more than an epoch ahead from its
// parent state, we have to advance the state forward.
parentRoot := blk.Block().ParentRoot()
parentState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, parentState, parentRoot[:], blk.Block().Slot())
if err != nil {
return nil, err
}
idx, err := helpers.BeaconProposerIndex(ctx, parentState)
if err != nil {
return nil, err
}
if blk.Block().ProposerIndex() != idx {
s.setBadBlock(ctx, blockRoot)
return nil, errors.New("incorrect proposer index")
}
return parentState, nil
}
func validateDenebBeaconBlock(blk interfaces.ReadOnlyBeaconBlock) error {
if blk.Version() < version.Deneb {
return nil

Some files were not shown because too many files have changed in this diff Show More