mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
30 Commits
lockAnalyz
...
e2eProxy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54ae2b32b2 | ||
|
|
58f4ba758c | ||
|
|
64f64f06bf | ||
|
|
e70055733f | ||
|
|
36e4f49af0 | ||
|
|
d98428dec4 | ||
|
|
00b92e01d3 | ||
|
|
b84c1aa3ea | ||
|
|
ca5adbf7e4 | ||
|
|
621e149dce | ||
|
|
a083b7a0a5 | ||
|
|
dd5995b665 | ||
|
|
6903d52dde | ||
|
|
ac8d27bcf1 | ||
|
|
8d6afb3afd | ||
|
|
d51f716675 | ||
|
|
0411b7eceb | ||
|
|
8dfc80187d | ||
|
|
3833f78803 | ||
|
|
83a83279d4 | ||
|
|
bdab34fd01 | ||
|
|
de0143e036 | ||
|
|
2a7a09b112 | ||
|
|
984575ed57 | ||
|
|
927e338f9e | ||
|
|
f44c99d92a | ||
|
|
7d669f23ab | ||
|
|
9b64c33bd1 | ||
|
|
defa602e50 | ||
|
|
67c8776f3c |
22
.github/workflows/horusec.yaml
vendored
Normal file
22
.github/workflows/horusec.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Horusec Security Scan
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Runs cron at 16.00 UTC on
|
||||
- cron: '0 0 * * SUN'
|
||||
|
||||
jobs:
|
||||
Horusec_Scan:
|
||||
name: horusec-Scan
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/develop'
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with: # Required when commit authors is enabled
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Running Security Scan
|
||||
run: |
|
||||
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
@@ -48,6 +48,7 @@ go_library(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
|
||||
@@ -331,7 +331,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
}
|
||||
@@ -358,10 +358,14 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, nil
|
||||
}
|
||||
|
||||
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, bytesutil.ToBytes32(validatedCheckpoint.Root))
|
||||
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if lastValidated == nil {
|
||||
return false, errInvalidNilSummary
|
||||
}
|
||||
|
||||
if ss.Slot > lastValidated.Slot {
|
||||
return true, nil
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
@@ -16,10 +17,13 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
}()
|
||||
s.HeadSlot()
|
||||
<-wait
|
||||
@@ -31,12 +35,16 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err := s.HeadRoot(context.Background())
|
||||
_, err = s.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
@@ -49,10 +57,14 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{block: wsb},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadBlock(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -64,12 +76,16 @@ func TestHeadState_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err := s.HeadState(context.Background())
|
||||
_, err = s.HeadState(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
@@ -477,6 +477,9 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.ErrorContains(t, "nil summary returned from the DB", err)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
@@ -493,6 +496,17 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, validated)
|
||||
|
||||
// Before the first finalized epoch, finalized root could be zeros.
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
@@ -528,6 +542,9 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.ErrorContains(t, "nil summary returned from the DB", err)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
@@ -543,6 +560,17 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, validated)
|
||||
|
||||
// Before the first finalized epoch, finalized root could be zeros.
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
|
||||
@@ -40,7 +40,15 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update head")
|
||||
}
|
||||
return s.saveHead(ctx, headRoot)
|
||||
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
return s.saveHead(ctx, headRoot, headBlock, headState)
|
||||
}
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
@@ -97,7 +105,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
// new head root to the DB.
|
||||
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock block.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||
defer span.End()
|
||||
|
||||
@@ -109,6 +117,12 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
if headRoot == bytesutil.ToBytes32(r) {
|
||||
return nil
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(headBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if headState == nil || headState.IsNil() {
|
||||
return errors.New("cannot save nil head state")
|
||||
}
|
||||
|
||||
// If the head state is not available, just return nil.
|
||||
// There's nothing to cache
|
||||
@@ -116,31 +130,13 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the new head block from DB.
|
||||
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(newHeadBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the new head state from cached state or DB.
|
||||
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
if newHeadState == nil || newHeadState.IsNil() {
|
||||
return errors.New("cannot save nil head state")
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := newHeadBlock.Block().Slot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
oldHeadRoot := s.headRoot()
|
||||
oldStateRoot := s.headBlock().Block().StateRoot()
|
||||
newStateRoot := newHeadBlock.Block().StateRoot()
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
|
||||
newStateRoot := headBlock.Block().StateRoot()
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
@@ -172,7 +168,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
}
|
||||
|
||||
// Cache the new head info.
|
||||
s.setHead(headRoot, newHeadBlock, newHeadState)
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
|
||||
// Save the new head root to DB.
|
||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
@@ -182,7 +178,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
// Forward an event capturing a new chain head over a common event feed
|
||||
// done in a goroutine to avoid blocking the critical runtime main routine.
|
||||
go func() {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not notify event feed of new chain head")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -29,8 +29,10 @@ func TestSaveHead_Same(t *testing.T) {
|
||||
|
||||
r := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: r}
|
||||
|
||||
require.NoError(t, service.saveHead(context.Background(), r))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), r, b, st))
|
||||
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
||||
}
|
||||
@@ -68,7 +70,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
@@ -114,7 +116,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
@@ -158,7 +160,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
||||
headRoot, err := service.updateHead(context.Background(), []uint64{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.saveHead(context.Background(), headRoot))
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
|
||||
}
|
||||
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
|
||||
@@ -138,6 +138,26 @@ var (
|
||||
Name: "state_balance_cache_miss",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_valid_node_count",
|
||||
Help: "Count the number of valid nodes after newPayload EE call",
|
||||
})
|
||||
newPayloadOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after newPayload EE call",
|
||||
})
|
||||
newPayloadInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_invalid_node_count",
|
||||
Help: "Count the number of invalid nodes after newPayload EE call",
|
||||
})
|
||||
forkchoiceUpdatedValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_valid_node_count",
|
||||
Help: "Count the number of valid nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
forkchoiceUpdatedOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -5,14 +5,21 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -20,7 +27,7 @@ import (
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
|
||||
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.BeaconState, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
|
||||
defer span.End()
|
||||
|
||||
@@ -43,6 +50,12 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
if finalizedBlock == nil || finalizedBlock.IsNil() {
|
||||
finalizedBlock = s.getInitSyncBlock(s.ensureRootNotZeros(finalizedRoot))
|
||||
if finalizedBlock == nil || finalizedBlock.IsNil() {
|
||||
return nil, errors.Errorf("finalized block with root %#x does not exist in the db or our cache", s.ensureRootNotZeros(finalizedRoot))
|
||||
}
|
||||
}
|
||||
var finalizedHash []byte
|
||||
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
|
||||
finalizedHash = params.BeaconConfig().ZeroHash[:]
|
||||
@@ -60,24 +73,36 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
|
||||
FinalizedBlockHash: finalizedHash,
|
||||
}
|
||||
|
||||
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
|
||||
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, headState, nextSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload attribute")
|
||||
}
|
||||
|
||||
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
}
|
||||
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
@@ -108,20 +133,35 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, errors.New("could not validate an INVALID payload from execution engine")
|
||||
default:
|
||||
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
|
||||
}
|
||||
}
|
||||
|
||||
newPayloadValidNodeCount.Inc()
|
||||
// During the transition event, the transition block should be verified for sanity.
|
||||
if blocks.IsPreBellatrixVersion(preStateVersion) {
|
||||
// Handle case where pre-state is Altair but block contains payload.
|
||||
@@ -168,3 +208,69 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.Beacon
|
||||
}
|
||||
return parentIsExecutionBlock, nil
|
||||
}
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
|
||||
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
|
||||
if !ok { // There's no need to build attribute if there is no proposer for slot.
|
||||
return false, nil, 0, nil
|
||||
}
|
||||
|
||||
// Get previous randao.
|
||||
st = st.Copy()
|
||||
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
|
||||
if err != nil {
|
||||
return false, nil, 0, err
|
||||
}
|
||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return false, nil, 0, nil
|
||||
}
|
||||
|
||||
// Get fee recipient.
|
||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Error("Fee recipient not set. Using burn address")
|
||||
}
|
||||
case err != nil:
|
||||
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
|
||||
default:
|
||||
feeRecipient = recipient
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
return false, nil, 0, err
|
||||
}
|
||||
attr := &enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
}
|
||||
return true, attr, proposerID, nil
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
|
||||
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
|
||||
for _, root := range blkRoots {
|
||||
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete block also deletes the state as well.
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
|
||||
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
|
||||
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
@@ -24,6 +27,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
@@ -44,8 +48,13 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
@@ -157,7 +166,8 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, service.headRoot(), tt.finalizedRoot)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, st, tt.blk, service.headRoot(), tt.finalizedRoot)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -183,13 +193,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
},
|
||||
}
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
@@ -197,10 +200,32 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
BlockNumber: 1,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
r, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -244,7 +269,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: powchain.ErrInvalidPayloadStatus,
|
||||
errString: "could not validate execution payload from execution engine: payload status is INVALID",
|
||||
errString: "could not validate an INVALID payload from execution engine",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
@@ -573,6 +598,47 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
|
||||
require.Equal(t, true, candidate)
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
hasPayload, _, vId, err := service.getPayloadAttribute(ctx, nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, types.ValidatorIndex(0), vId)
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := types.ValidatorIndex(1)
|
||||
slot := types.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId, err := service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
|
||||
hasPayload, attr, vId, err = service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient))
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
@@ -588,7 +654,6 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
@@ -671,3 +736,59 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
|
||||
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
|
||||
}
|
||||
|
||||
func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deleting unknown block should not error.
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
|
||||
|
||||
// Happy case
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
blk1, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
r1, err := blk1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: 1,
|
||||
Root: r1[:],
|
||||
}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r1))
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 2
|
||||
blk2, err := wrapper.WrappedSignedBeaconBlock(b2)
|
||||
require.NoError(t, err)
|
||||
r2, err := blk2.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk2))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: 2,
|
||||
Root: r2[:],
|
||||
}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r2))
|
||||
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{r1, r2}))
|
||||
|
||||
require.Equal(t, false, service.hasBlock(ctx, r1))
|
||||
require.Equal(t, false, service.hasBlock(ctx, r2))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r1))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r2))
|
||||
has, err := service.cfg.StateGen.HasState(ctx, r1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
has, err = service.cfg.StateGen.HasState(ctx, r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
@@ -66,6 +67,14 @@ func WithDepositCache(c *depositcache.DepositCache) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithProposerIdsCache for proposer id cache.
|
||||
func WithProposerIdsCache(c *cache.ProposerPayloadIDsCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ProposerSlotIndexCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttestationPool for attestation lifecycle after chain inclusion.
|
||||
func WithAttestationPool(p attestations.Pool) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -127,8 +127,8 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
@@ -148,6 +148,9 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
@@ -208,10 +211,18 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), s.headRoot(), bytesutil.ToBytes32(finalized.Root)); err != nil {
|
||||
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, headRoot); err != nil {
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, headState, headBlock.Block(), headRoot, bytesutil.ToBytes32(finalized.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, headRoot, headBlock, headState); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
|
||||
@@ -258,7 +269,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not prune proto array fork choice nodes")
|
||||
}
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
@@ -424,7 +435,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, preState, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
@@ -601,9 +612,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -247,7 +247,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
}
|
||||
|
||||
fRoot := bytesutil.ToBytes32(cp.Root)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -181,15 +181,26 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
|
||||
return
|
||||
}
|
||||
_, err := s.notifyForkchoiceUpdate(s.ctx,
|
||||
s.headBlock().Block(),
|
||||
s.headRoot(),
|
||||
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get block from db")
|
||||
return
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get state from db")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx,
|
||||
headState,
|
||||
newHeadBlock.Block(),
|
||||
newHeadRoot,
|
||||
bytesutil.ToBytes32(finalized.Root),
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not notify forkchoice update")
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot); err != nil {
|
||||
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -133,7 +134,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
service.notifyEngineIfChangedHead(ctx, service.headRoot())
|
||||
hookErr := "could not notify forkchoice update"
|
||||
finalizedErr := "could not get finalized checkpoint"
|
||||
@@ -143,21 +144,33 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
require.LogsContain(t, hook, finalizedErr)
|
||||
|
||||
hook.Reset()
|
||||
service.head = &head{
|
||||
root: [32]byte{'a'},
|
||||
block: nil, /* should not panic if notify head uses correct head */
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
b.Block.Slot = 2
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
r1, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalized := ðpb.Checkpoint{Root: r[:], Epoch: 0}
|
||||
finalized := ðpb.Checkpoint{Root: r1[:], Epoch: 0}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
slot: 1,
|
||||
root: r,
|
||||
root: r1,
|
||||
block: wsb,
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.store.SetFinalizedCheckpt(finalized)
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{'b'})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, types.ValidatorIndex(1), vId)
|
||||
require.Equal(t, [8]byte{1}, payloadID)
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ type config struct {
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -20,8 +22,11 @@ func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
}()
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
}
|
||||
|
||||
3
beacon-chain/cache/BUILD.bazel
vendored
3
beacon-chain/cache/BUILD.bazel
vendored
@@ -13,6 +13,7 @@ go_library(
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
@@ -26,6 +27,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
@@ -61,6 +63,7 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_id_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
|
||||
@@ -36,7 +36,7 @@ type DepositFetcher interface {
|
||||
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
|
||||
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
|
||||
FinalizedDeposits(ctx context.Context) *FinalizedDeposits
|
||||
NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
|
||||
}
|
||||
|
||||
// FinalizedDeposits stores the trie of deposits that have been included
|
||||
@@ -246,7 +246,7 @@ func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposit
|
||||
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
@@ -256,10 +256,9 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, d := range dc.deposits {
|
||||
if (d.Index > lastFinalizedDepositIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
deposits = append(deposits, d.Deposit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -554,7 +554,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
})
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), nil)
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
|
||||
assert.Equal(t, 2, len(deps))
|
||||
}
|
||||
|
||||
@@ -611,7 +611,7 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
})
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
}
|
||||
|
||||
|
||||
73
beacon-chain/cache/payload_id.go
vendored
Normal file
73
beacon-chain/cache/payload_id.go
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
)
|
||||
|
||||
const vIdLength = 8
|
||||
const pIdLength = 8
|
||||
const vpIdsLength = vIdLength + pIdLength
|
||||
|
||||
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
|
||||
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
|
||||
type ProposerPayloadIDsCache struct {
|
||||
slotToProposerAndPayloadIDs map[types.Slot][vpIdsLength]byte
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
|
||||
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
|
||||
return &ProposerPayloadIDsCache{
|
||||
slotToProposerAndPayloadIDs: make(map[types.Slot][vpIdsLength]byte),
|
||||
}
|
||||
}
|
||||
|
||||
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
|
||||
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot types.Slot) (types.ValidatorIndex, [8]byte, bool) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
ids, ok := f.slotToProposerAndPayloadIDs[slot]
|
||||
if !ok {
|
||||
return 0, [8]byte{}, false
|
||||
}
|
||||
vId := ids[:vIdLength]
|
||||
|
||||
b := ids[vIdLength:]
|
||||
var pId [pIdLength]byte
|
||||
copy(pId[:], b)
|
||||
|
||||
return types.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
|
||||
}
|
||||
|
||||
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
|
||||
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot types.Slot, vId types.ValidatorIndex, pId [8]byte) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
var vIdBytes [vIdLength]byte
|
||||
copy(vIdBytes[:], bytesutil.Uint64ToBytesBigEndian(uint64(vId)))
|
||||
|
||||
var bytes [vpIdsLength]byte
|
||||
copy(bytes[:], append(vIdBytes[:], pId[:]...))
|
||||
|
||||
_, ok := f.slotToProposerAndPayloadIDs[slot]
|
||||
// Ok to overwrite if the slot is already set but the payload ID is not set.
|
||||
// This combats the re-org case where payload assignment could change the epoch of.
|
||||
if !ok || (ok && pId != [pIdLength]byte{}) {
|
||||
f.slotToProposerAndPayloadIDs[slot] = bytes
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePayloadIDs removes the payload id entries that's current than input slot.
|
||||
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot types.Slot) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
for s := range f.slotToProposerAndPayloadIDs {
|
||||
if slot > s {
|
||||
delete(f.slotToProposerAndPayloadIDs, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
60
beacon-chain/cache/payload_id_test.go
vendored
Normal file
60
beacon-chain/cache/payload_id_test.go
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
|
||||
cache := NewProposerPayloadIDsCache()
|
||||
i, p, ok := cache.GetProposerPayloadIDs(0)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
|
||||
slot := types.Slot(1234)
|
||||
vid := types.ValidatorIndex(34234324)
|
||||
pid := [8]byte{1, 2, 3, 3, 7, 8, 7, 8}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, pid)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, pid, p)
|
||||
|
||||
slot = types.Slot(9456456)
|
||||
vid = types.ValidatorIndex(6786745)
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, [pIdLength]byte{})
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
|
||||
// reset cache without pid
|
||||
slot = types.Slot(9456456)
|
||||
vid = types.ValidatorIndex(11111)
|
||||
pid = [8]byte{3, 2, 3, 33, 72, 8, 7, 8}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, pid)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, pid, p)
|
||||
|
||||
// reset cache with existing pid
|
||||
slot = types.Slot(9456456)
|
||||
vid = types.ValidatorIndex(11111)
|
||||
newPid := [8]byte{1, 2, 3, 33, 72, 8, 7, 1}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, newPid)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, newPid, p)
|
||||
|
||||
// remove cache entry
|
||||
cache.PrunePayloadIDs(slot + 1)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
}
|
||||
@@ -175,9 +175,7 @@ func CommitteeAssignments(
|
||||
return nil, nil, err
|
||||
}
|
||||
proposerIndexToSlots := make(map[types.ValidatorIndex][]types.Slot, params.BeaconConfig().SlotsPerEpoch)
|
||||
// Proposal epochs do not have a look ahead, so we skip them over here.
|
||||
validProposalEpoch := epoch < nextEpoch
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch && validProposalEpoch; slot++ {
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
@@ -192,6 +190,15 @@ func CommitteeAssignments(
|
||||
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
|
||||
}
|
||||
|
||||
// If previous proposer indices computation is outside if current proposal epoch range,
|
||||
// we need to reset state slot back to start slot so that we can compute the correct committees.
|
||||
currentProposalEpoch := epoch < nextEpoch
|
||||
if !currentProposalEpoch {
|
||||
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
@@ -232,7 +232,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
|
||||
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(proposerIndxs), "wanted empty proposer index set")
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
|
||||
@@ -94,6 +94,7 @@ go_test(
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
"validated_checkpoint_test.go",
|
||||
"wss_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
@@ -101,6 +102,7 @@ go_test(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
|
||||
@@ -233,13 +233,17 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
defer span.End()
|
||||
|
||||
if err := s.DeleteState(ctx, root); err != nil {
|
||||
return errDeleteFinalized
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.deleteStateSummary(root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
if b := bkt.Get(root[:]); b != nil {
|
||||
return errDeleteFinalized
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
|
||||
|
||||
@@ -191,6 +191,16 @@ func TestStore_DeleteBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
ss := make([]*ethpb.StateSummary, len(blks))
|
||||
for i, blk := range blks {
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ss[i] = ðpb.StateSummary{
|
||||
Slot: blk.Block().Slot(),
|
||||
Root: r[:],
|
||||
}
|
||||
}
|
||||
require.NoError(t, db.SaveStateSummaries(ctx, ss))
|
||||
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -216,11 +226,50 @@ func TestStore_DeleteBlock(t *testing.T) {
|
||||
b, err = db.Block(ctx, root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, nil)
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, root2))
|
||||
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), errDeleteFinalized)
|
||||
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteJustifiedBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Root: root[:],
|
||||
}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteFinalizedBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Root: root[:],
|
||||
}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
func TestStore_GenesisBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -2,8 +2,8 @@ package kv
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
|
||||
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
|
||||
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
|
||||
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
|
||||
|
||||
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
|
||||
// indicate that a value couldn't be found.
|
||||
|
||||
@@ -346,19 +346,31 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
|
||||
bkt = tx.Bucket(checkpointBucket)
|
||||
enc := bkt.Get(finalizedCheckpointKey)
|
||||
checkpoint := ðpb.Checkpoint{}
|
||||
finalized := ðpb.Checkpoint{}
|
||||
if enc == nil {
|
||||
checkpoint = ðpb.Checkpoint{Root: genesisBlockRoot}
|
||||
} else if err := decode(ctx, enc, checkpoint); err != nil {
|
||||
finalized = ðpb.Checkpoint{Root: genesisBlockRoot}
|
||||
} else if err := decode(ctx, enc, finalized); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc = bkt.Get(justifiedCheckpointKey)
|
||||
justified := ðpb.Checkpoint{}
|
||||
if enc == nil {
|
||||
justified = ðpb.Checkpoint{Root: genesisBlockRoot}
|
||||
} else if err := decode(ctx, enc, justified); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockBkt := tx.Bucket(blocksBucket)
|
||||
headBlkRoot := blockBkt.Get(headBlockRootKey)
|
||||
bkt = tx.Bucket(stateBucket)
|
||||
// Safe guard against deleting genesis, finalized, head state.
|
||||
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
|
||||
return errors.New("cannot delete genesis, finalized, or head state")
|
||||
// Safeguard against deleting genesis, finalized, head state.
|
||||
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
// Nothing to delete if state doesn't exist.
|
||||
enc = bkt.Get(blockRoot[:])
|
||||
if enc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
slot, err := s.slotByBlockRoot(ctx, tx, blockRoot[:])
|
||||
|
||||
@@ -110,3 +110,12 @@ func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
|
||||
s.stateSummaryCache.clear()
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteStateSummary deletes a state summary object from the db using input block root.
|
||||
func (s *Store) deleteStateSummary(blockRoot [32]byte) error {
|
||||
s.stateSummaryCache.delete(blockRoot)
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateSummaryBucket)
|
||||
return bucket.Delete(blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
@@ -37,6 +37,13 @@ func (c *stateSummaryCache) has(r [32]byte) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// delete state summary in cache.
|
||||
func (c *stateSummaryCache) delete(r [32]byte) {
|
||||
c.initSyncStateSummariesLock.Lock()
|
||||
defer c.initSyncStateSummariesLock.Unlock()
|
||||
delete(c.initSyncStateSummaries, r)
|
||||
}
|
||||
|
||||
// get retrieves a state summary from the initial sync state summaries cache using the root of
|
||||
// the block.
|
||||
func (c *stateSummaryCache) get(r [32]byte) *ethpb.StateSummary {
|
||||
|
||||
@@ -62,3 +62,17 @@ func TestStateSummary_CacheToDB(t *testing.T) {
|
||||
require.Equal(t, true, db.HasStateSummary(context.Background(), bytesutil.ToBytes32(r)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateSummary_CanDelete(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
r1 := bytesutil.ToBytes32([]byte{'A'})
|
||||
s1 := ðpb.StateSummary{Slot: 1, Root: r1[:]}
|
||||
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
|
||||
require.NoError(t, db.SaveStateSummary(ctx, s1))
|
||||
require.Equal(t, true, db.HasStateSummary(ctx, r1), "State summary should be saved")
|
||||
|
||||
require.NoError(t, db.deleteStateSummary(r1))
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
|
||||
}
|
||||
|
||||
@@ -412,7 +412,7 @@ func TestStore_DeleteGenesisState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, db.SaveState(ctx, st, genesisBlockRoot))
|
||||
wantedErr := "cannot delete genesis, finalized, or head state"
|
||||
wantedErr := "cannot delete finalized block or state"
|
||||
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, genesisBlockRoot))
|
||||
}
|
||||
|
||||
@@ -440,7 +440,7 @@ func TestStore_DeleteFinalizedState(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, finalizedState, finalizedBlockRoot))
|
||||
finalizedCheckpoint := ðpb.Checkpoint{Root: finalizedBlockRoot[:]}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, finalizedCheckpoint))
|
||||
wantedErr := "cannot delete genesis, finalized, or head state"
|
||||
wantedErr := "cannot delete finalized block or state"
|
||||
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, finalizedBlockRoot))
|
||||
}
|
||||
|
||||
@@ -465,8 +465,7 @@ func TestStore_DeleteHeadState(t *testing.T) {
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, db.SaveState(ctx, st, headBlockRoot))
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
|
||||
wantedErr := "cannot delete genesis, finalized, or head state"
|
||||
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, headBlockRoot))
|
||||
require.NoError(t, db.DeleteState(ctx, headBlockRoot)) // Ok to delete head state if it's optimistic.
|
||||
}
|
||||
|
||||
func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
|
||||
|
||||
@@ -77,6 +77,12 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
}
|
||||
|
||||
// save origin block root in a special key, to be used when the canonical
|
||||
// origin (start of chain, ie alternative to genesis) block or state is needed
|
||||
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save origin block root")
|
||||
}
|
||||
|
||||
// rebuild the checkpoint from the block
|
||||
// use it to mark the block as justified and finalized
|
||||
slotEpoch, err := wblk.Block().Slot().SafeDivSlot(params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -94,11 +100,5 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as finalized")
|
||||
}
|
||||
|
||||
// save origin block root in a special key, to be used when the canonical
|
||||
// origin (start of chain, ie alternative to genesis) block or state is needed
|
||||
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save origin block root")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
45
beacon-chain/db/kv/wss_test.go
Normal file
45
beacon-chain/db/kv/wss_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/genesis"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestSaveOrigin(t *testing.T) {
|
||||
// Embedded Genesis works with Mainnet config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ConfigName = params.ConfigNames[params.Mainnet]
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
db := setupDB(t)
|
||||
|
||||
st, err := genesis.State(params.Mainnet.String())
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.LoadGenesis(ctx, sb))
|
||||
|
||||
// this is necessary for mainnet, because LoadGenesis is short-circuited by the embedded state,
|
||||
// so the genesis root key is never written to the db.
|
||||
require.NoError(t, db.EnsureEmbeddedGenesis(ctx))
|
||||
|
||||
cst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
csb, err := cst.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
cb := util.NewBeaconBlock()
|
||||
scb, err := wrapper.WrappedSignedBeaconBlock(cb)
|
||||
require.NoError(t, err)
|
||||
cbb, err := scb.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveOrigin(ctx, csb, cbb))
|
||||
}
|
||||
@@ -155,7 +155,7 @@ func (_ *Service) FinalizedDeposits(_ context.Context) *depositcache.FinalizedDe
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits mocks out the deposit cache functionality for interop.
|
||||
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
|
||||
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ int64, _ *big.Int) []*ethpb.Deposit {
|
||||
return []*ethpb.Deposit{}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,11 +22,13 @@ go_library(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,8 +3,8 @@ package doublylinkedtree
|
||||
import "errors"
|
||||
|
||||
var ErrNilNode = errors.New("invalid nil or unknown node")
|
||||
var errInvalidBalance = errors.New("invalid node balance")
|
||||
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
|
||||
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
|
||||
var errUnknownJustifiedRoot = errors.New("unknown justified root")
|
||||
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
|
||||
var errUnknownPayloadHash = errors.New("unknown payload hash")
|
||||
|
||||
@@ -2,12 +2,15 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -18,6 +21,7 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
}
|
||||
|
||||
@@ -168,7 +172,7 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the given root has been optimistically synced.
|
||||
func (f *ForkChoice) IsOptimistic(_ context.Context, root [32]byte) (bool, error) {
|
||||
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
@@ -249,9 +253,21 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
|
||||
return ErrNilNode
|
||||
}
|
||||
if currentNode.balance < oldBalance {
|
||||
return errInvalidBalance
|
||||
f.store.proposerBoostLock.RLock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
|
||||
"oldBalance": oldBalance,
|
||||
"nodeBalance": currentNode.balance,
|
||||
"nodeWeight": currentNode.weight,
|
||||
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.proposerBoostRoot[:])),
|
||||
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
|
||||
"previousProposerBoostScore": f.store.previousProposerBoostScore,
|
||||
}).Warning("node with invalid balance, setting it to zero")
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
currentNode.balance = 0
|
||||
} else {
|
||||
currentNode.balance -= oldBalance
|
||||
}
|
||||
currentNode.balance -= oldBalance
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,6 +318,6 @@ func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.removeNode(ctx, root)
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.setOptimisticToInvalid(ctx, root, payloadHash)
|
||||
}
|
||||
|
||||
@@ -58,6 +58,30 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
f.balances = []uint64{125, 125, 125}
|
||||
f.votes = []Vote{
|
||||
{indexToHash(1), indexToHash(1), 0},
|
||||
{indexToHash(2), indexToHash(2), 0},
|
||||
{indexToHash(3), indexToHash(3), 0},
|
||||
}
|
||||
|
||||
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -3,9 +3,12 @@ package doublylinkedtree
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "forkchoice-doublylinkedtree")
|
||||
|
||||
headSlotNumber = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_head_slot",
|
||||
@@ -48,10 +51,4 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
validatedCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_validated_count",
|
||||
Help: "The number of blocks that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -109,7 +109,7 @@ func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) boo
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and the parent as validated (i.e. non-optimistic).
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
@@ -120,7 +120,6 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
}
|
||||
|
||||
n.optimistic = false
|
||||
validatedCount.Inc()
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -180,27 +180,27 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
opt, err := f.IsOptimistic(ctx, indexToHash(5))
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
opt, err = f.IsOptimistic(ctx, indexToHash(4))
|
||||
opt, err = f.IsOptimistic(indexToHash(4))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
|
||||
|
||||
// block 5 should still be optimistic
|
||||
opt, err = f.IsOptimistic(ctx, indexToHash(5))
|
||||
opt, err = f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
// block 4 and 3 should now be valid
|
||||
opt, err = f.IsOptimistic(ctx, indexToHash(4))
|
||||
opt, err = f.IsOptimistic(indexToHash(4))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
opt, err = f.IsOptimistic(ctx, indexToHash(3))
|
||||
opt, err = f.IsOptimistic(indexToHash(3))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
}
|
||||
|
||||
@@ -2,22 +2,54 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, payloadHash [32]byte) ([][32]byte, error) {
|
||||
s.nodesLock.Lock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, ErrNilNode
|
||||
}
|
||||
// Check if last valid hash is an ancestor of the passed node.
|
||||
lastValid, ok := s.nodeByPayload[payloadHash]
|
||||
if !ok || lastValid == nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, errUnknownPayloadHash
|
||||
}
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
|
||||
if ctx.Err() != nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
}
|
||||
// If the last valid payload is in a different fork, we remove only the
|
||||
// passed node.
|
||||
if firstInvalid.parent == nil {
|
||||
firstInvalid = node
|
||||
}
|
||||
s.nodesLock.Unlock()
|
||||
return s.removeNode(ctx, firstInvalid)
|
||||
}
|
||||
|
||||
// removeNode removes the node with the given root and all of its children
|
||||
// from the Fork Choice Store.
|
||||
func (s *Store) removeNode(ctx context.Context, root [32]byte) ([][32]byte, error) {
|
||||
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
if node == nil {
|
||||
return invalidRoots, ErrNilNode
|
||||
}
|
||||
if !node.optimistic || node.parent == nil {
|
||||
return invalidRoots, errInvalidOptimisticStatus
|
||||
}
|
||||
|
||||
children := node.parent.children
|
||||
if len(children) == 1 {
|
||||
node.parent.children = []*Node{}
|
||||
@@ -47,6 +79,16 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
|
||||
}
|
||||
}
|
||||
invalidRoots = append(invalidRoots, node.root)
|
||||
s.proposerBoostLock.Lock()
|
||||
if node.root == s.proposerBoostRoot {
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
}
|
||||
if node.root == s.previousProposerBoostRoot {
|
||||
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
s.proposerBoostLock.Unlock()
|
||||
delete(s.nodeByRoot, node.root)
|
||||
delete(s.nodeByPayload, node.payloadHash)
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
@@ -24,32 +24,71 @@ import (
|
||||
func TestPruneInvalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new INVALID block
|
||||
payload [32]byte // the last valid hash
|
||||
wantedNodeNumber int
|
||||
wantedRoots [][32]byte
|
||||
}{
|
||||
{
|
||||
[32]byte{'j'},
|
||||
[32]byte{'B'},
|
||||
12,
|
||||
[][32]byte{[32]byte{'j'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'c'},
|
||||
[32]byte{'B'},
|
||||
4,
|
||||
[][32]byte{[32]byte{'f'}, [32]byte{'e'}, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'},
|
||||
[32]byte{'k'}, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'c'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'H'},
|
||||
12,
|
||||
[][32]byte{[32]byte{'i'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'h'},
|
||||
[32]byte{'G'},
|
||||
11,
|
||||
[][32]byte{[32]byte{'i'}, [32]byte{'h'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'D'},
|
||||
11,
|
||||
[][32]byte{[32]byte{'f'}, [32]byte{'e'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'h'},
|
||||
[32]byte{'C'},
|
||||
5,
|
||||
[][32]byte{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'e'},
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'l'},
|
||||
[32]byte{'k'},
|
||||
[32]byte{'g'},
|
||||
[32]byte{'d'},
|
||||
},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
[32]byte{'E'},
|
||||
8,
|
||||
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
|
||||
},
|
||||
@@ -58,22 +97,45 @@ func TestPruneInvalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
|
||||
roots, err := f.store.removeNode(context.Background(), tc.root)
|
||||
roots, err := f.store.setOptimisticToInvalid(context.Background(), tc.root, tc.payload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tc.wantedRoots, roots)
|
||||
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
|
||||
}
|
||||
}
|
||||
|
||||
// This is a regression test (10445)
|
||||
func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.proposerBoostRoot = [32]byte{'c'}
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'b'}
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
|
||||
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
f.store.proposerBoostLock.RLock()
|
||||
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
@@ -121,6 +121,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
payloadHash: payloadHash,
|
||||
}
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
s.nodeByRoot[root] = n
|
||||
if parent != nil {
|
||||
parent.children = append(parent.children, n)
|
||||
|
||||
@@ -107,7 +107,8 @@ func TestStore_Insert(t *testing.T) {
|
||||
// The new node does not have a parent.
|
||||
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
|
||||
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload}
|
||||
payloadHash := [32]byte{'a'}
|
||||
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1))
|
||||
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
|
||||
|
||||
@@ -26,6 +26,7 @@ type Store struct {
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ type ForkChoicer interface {
|
||||
type HeadRetriever interface {
|
||||
Head(context.Context, types.Epoch, [32]byte, []uint64, types.Epoch) ([32]byte, error)
|
||||
Tips() ([][32]byte, []types.Slot)
|
||||
IsOptimistic(ctx context.Context, root [32]byte) (bool, error)
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// BlockProcessor processes the block that's used for accounting fork choice.
|
||||
@@ -71,5 +71,5 @@ type Getter interface {
|
||||
// Setter allows to set forkchoice information
|
||||
type Setter interface {
|
||||
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
|
||||
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte) ([][32]byte, error)
|
||||
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
|
||||
}
|
||||
|
||||
@@ -22,12 +22,14 @@ go_library(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -5,11 +5,11 @@ import "errors"
|
||||
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
|
||||
var errUnknownJustifiedRoot = errors.New("unknown justified root")
|
||||
var errInvalidNodeIndex = errors.New("node index is invalid")
|
||||
var errInvalidFinalizedNode = errors.New("invalid finalized block on chain")
|
||||
var ErrUnknownNodeRoot = errors.New("unknown block root")
|
||||
var errInvalidJustifiedIndex = errors.New("justified index is invalid")
|
||||
var errInvalidBestChildIndex = errors.New("best child index is invalid")
|
||||
var errInvalidBestDescendantIndex = errors.New("best descendant index is invalid")
|
||||
var errInvalidParentDelta = errors.New("parent delta is invalid")
|
||||
var errInvalidNodeDelta = errors.New("node delta is invalid")
|
||||
var errInvalidDeltaLength = errors.New("delta length is invalid")
|
||||
var errInvalidSyncedTips = errors.New("invalid synced tips")
|
||||
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
|
||||
|
||||
@@ -85,12 +85,9 @@ func copyNode(node *Node) *Node {
|
||||
return &Node{}
|
||||
}
|
||||
|
||||
copiedRoot := [32]byte{}
|
||||
copy(copiedRoot[:], node.root[:])
|
||||
|
||||
return &Node{
|
||||
slot: node.slot,
|
||||
root: copiedRoot,
|
||||
root: node.root,
|
||||
parent: node.parent,
|
||||
payloadHash: node.payloadHash,
|
||||
justifiedEpoch: node.justifiedEpoch,
|
||||
@@ -98,5 +95,6 @@ func copyNode(node *Node) *Node {
|
||||
weight: node.weight,
|
||||
bestChild: node.bestChild,
|
||||
bestDescendant: node.bestDescendant,
|
||||
status: node.status,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,12 @@ package protoarray
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "forkchoice-protoarray")
|
||||
|
||||
headSlotNumber = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "proto_array_head_slot",
|
||||
@@ -48,16 +51,4 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
lastSyncedTipSlot = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "proto_array_last_synced_tip_slot",
|
||||
Help: "The slot of the last fully validated block added to the proto array.",
|
||||
},
|
||||
)
|
||||
syncedTipsCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "proto_array_synced_tips_count",
|
||||
Help: "The number of elements in the syncedTips structure.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -43,8 +43,3 @@ func (n *Node) BestChild() uint64 {
|
||||
func (n *Node) BestDescendant() uint64 {
|
||||
return n.bestDescendant
|
||||
}
|
||||
|
||||
// Graffiti of the fork choice node.
|
||||
func (n *Node) Graffiti() [32]byte {
|
||||
return n.graffiti
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ func TestNode_Getters(t *testing.T) {
|
||||
weight := uint64(10000)
|
||||
bestChild := uint64(5)
|
||||
bestDescendant := uint64(4)
|
||||
graffiti := [32]byte{'b'}
|
||||
n := &Node{
|
||||
slot: slot,
|
||||
root: root,
|
||||
@@ -26,7 +25,6 @@ func TestNode_Getters(t *testing.T) {
|
||||
weight: weight,
|
||||
bestChild: bestChild,
|
||||
bestDescendant: bestDescendant,
|
||||
graffiti: graffiti,
|
||||
}
|
||||
|
||||
require.Equal(t, slot, n.Slot())
|
||||
@@ -37,5 +35,4 @@ func TestNode_Getters(t *testing.T) {
|
||||
require.Equal(t, weight, n.Weight())
|
||||
require.Equal(t, bestChild, n.BestChild())
|
||||
require.Equal(t, bestDescendant, n.BestDescendant())
|
||||
require.Equal(t, graffiti, n.Graffiti())
|
||||
}
|
||||
|
||||
@@ -3,314 +3,159 @@ package protoarray
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
// This returns the minimum and maximum slot of the synced_tips tree
|
||||
func (f *ForkChoice) boundarySyncedTips() (types.Slot, types.Slot) {
|
||||
f.syncedTips.RLock()
|
||||
defer f.syncedTips.RUnlock()
|
||||
|
||||
min := params.BeaconConfig().FarFutureSlot
|
||||
max := types.Slot(0)
|
||||
for _, slot := range f.syncedTips.validatedTips {
|
||||
if slot > max {
|
||||
max = slot
|
||||
}
|
||||
if slot < min {
|
||||
min = slot
|
||||
}
|
||||
}
|
||||
return min, max
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if this node is optimistically synced
|
||||
// A optimistically synced block is synced as usual, but its
|
||||
// execution payload is not validated, while the EL is still syncing.
|
||||
// This function returns an error if the block is not found in the fork choice
|
||||
// store
|
||||
func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, error) {
|
||||
if ctx.Err() != nil {
|
||||
return false, ctx.Err()
|
||||
}
|
||||
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
index, ok := f.store.nodesIndices[root]
|
||||
if !ok {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return false, ErrUnknownNodeRoot
|
||||
}
|
||||
node := f.store.nodes[index]
|
||||
slot := node.slot
|
||||
|
||||
// If the node is a synced tip, then it's fully validated
|
||||
f.syncedTips.RLock()
|
||||
_, ok = f.syncedTips.validatedTips[root]
|
||||
if ok {
|
||||
f.syncedTips.RUnlock()
|
||||
f.store.nodesLock.RUnlock()
|
||||
return false, nil
|
||||
}
|
||||
f.syncedTips.RUnlock()
|
||||
|
||||
// If the slot is higher than the max synced tip, it's optimistic
|
||||
min, max := f.boundarySyncedTips()
|
||||
if slot > max {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If the slot is lower than the min synced tip, it's fully validated
|
||||
if slot <= min {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if the node is a leaf of the Fork Choice tree, then it's
|
||||
// optimistic
|
||||
childIndex := node.BestChild()
|
||||
if childIndex == NonExistentNode {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// recurse to the child
|
||||
child := f.store.nodes[childIndex]
|
||||
root = child.root
|
||||
f.store.nodesLock.RUnlock()
|
||||
return f.IsOptimistic(ctx, root)
|
||||
}
|
||||
|
||||
// This function returns the index of sync tip node that's ancestor to the input node.
|
||||
// In the event of none, `NonExistentNode` is returned.
|
||||
// This internal method assumes the caller holds a lock on syncedTips and s.nodesLock
|
||||
func (s *Store) findSyncedTip(ctx context.Context, node *Node, syncedTips *optimisticStore) (uint64, error) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return 0, ctx.Err()
|
||||
}
|
||||
if _, ok := syncedTips.validatedTips[node.root]; ok {
|
||||
return s.nodesIndices[node.root], nil
|
||||
}
|
||||
if node.parent == NonExistentNode {
|
||||
return NonExistentNode, nil
|
||||
}
|
||||
node = s.nodes[node.parent]
|
||||
}
|
||||
return node.status == syncing, nil
|
||||
}
|
||||
|
||||
// SetOptimisticToValid is called with the root of a block that was returned as
|
||||
// VALID by the EL. This routine recomputes and updates the synced_tips map to
|
||||
// account for this new tip.
|
||||
// WARNING: This method returns an error if the root is not found in forkchoice or
|
||||
// if the root is not a leaf of the fork choice tree.
|
||||
// VALID by the EL.
|
||||
// WARNING: This method returns an error if the root is not found in forkchoice
|
||||
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
|
||||
f.store.nodesLock.RLock()
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// We can only update if given root is in Fork Choice
|
||||
index, ok := f.store.nodesIndices[root]
|
||||
if !ok {
|
||||
return errInvalidNodeIndex
|
||||
return ErrUnknownNodeRoot
|
||||
}
|
||||
node := f.store.nodes[index]
|
||||
f.store.nodesLock.RUnlock()
|
||||
|
||||
// Stop early if the node is Valid
|
||||
optimistic, err := f.IsOptimistic(ctx, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !optimistic {
|
||||
return nil
|
||||
}
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
// Cache root and slot to validated tips
|
||||
newTips := make(map[[32]byte]types.Slot)
|
||||
newValidSlot := node.slot
|
||||
newTips[root] = newValidSlot
|
||||
|
||||
// Compute the full valid path from the given node to its previous synced tip
|
||||
// This path will now consist of fully validated blocks. Notice that
|
||||
// the previous tip may have been outside the Fork Choice store.
|
||||
// In this case, only one block can be in syncedTips as the whole
|
||||
// Fork Choice would be a descendant of this block.
|
||||
validPath := make(map[uint64]bool)
|
||||
validPath[index] = true
|
||||
for {
|
||||
for node := f.store.nodes[index]; node.status == syncing; node = f.store.nodes[index] {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
parentIndex := node.parent
|
||||
if parentIndex == NonExistentNode {
|
||||
node.status = valid
|
||||
index = node.parent
|
||||
if index == NonExistentNode {
|
||||
break
|
||||
}
|
||||
if parentIndex >= uint64(len(f.store.nodes)) {
|
||||
return errInvalidNodeIndex
|
||||
}
|
||||
node = f.store.nodes[parentIndex]
|
||||
_, ok = f.syncedTips.validatedTips[node.root]
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
validPath[parentIndex] = true
|
||||
}
|
||||
|
||||
// Retrieve the list of leaves in the Fork Choice
|
||||
// These are all the nodes that have NonExistentNode as best child.
|
||||
leaves, err := f.store.leaves()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For each leaf, recompute the new tip.
|
||||
for _, i := range leaves {
|
||||
node = f.store.nodes[i]
|
||||
j := i
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Stop if we reached the previous tip
|
||||
_, ok = f.syncedTips.validatedTips[node.root]
|
||||
if ok {
|
||||
newTips[node.root] = node.slot
|
||||
break
|
||||
}
|
||||
|
||||
// Stop if we reach valid path
|
||||
_, ok = validPath[j]
|
||||
if ok {
|
||||
newTips[node.root] = node.slot
|
||||
break
|
||||
}
|
||||
|
||||
j = node.parent
|
||||
if j == NonExistentNode {
|
||||
break
|
||||
}
|
||||
if j >= uint64(len(f.store.nodes)) {
|
||||
return errInvalidNodeIndex
|
||||
}
|
||||
node = f.store.nodes[j]
|
||||
}
|
||||
}
|
||||
|
||||
f.syncedTips.validatedTips = newTips
|
||||
lastSyncedTipSlot.Set(float64(newValidSlot))
|
||||
syncedTipsCount.Set(float64(len(newTips)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid updates the synced_tips map when the block with the given root becomes INVALID.
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [32]byte) ([][32]byte, error) {
|
||||
// SetOptimisticToInvalid updates the synced_tips map when the block with the given root becomes INVALID.
|
||||
// It takes two parameters: the root of the INVALID block and the payload Hash
|
||||
// of the last valid block.s
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, payloadHash [32]byte) ([][32]byte, error) {
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
idx, ok := f.store.nodesIndices[root]
|
||||
// We only support setting invalid a node existing in Forkchoice
|
||||
invalidIndex, ok := f.store.nodesIndices[root]
|
||||
if !ok {
|
||||
return invalidRoots, errInvalidNodeIndex
|
||||
return invalidRoots, ErrUnknownNodeRoot
|
||||
}
|
||||
node := f.store.nodes[idx]
|
||||
// We only support changing status for the tips in Fork Choice store.
|
||||
if node.bestChild != NonExistentNode {
|
||||
return invalidRoots, errInvalidNodeIndex
|
||||
node := f.store.nodes[invalidIndex]
|
||||
|
||||
lastValidIndex, ok := f.store.payloadIndices[payloadHash]
|
||||
if !ok || lastValidIndex == NonExistentNode {
|
||||
return invalidRoots, errInvalidFinalizedNode
|
||||
}
|
||||
|
||||
parentIndex := node.parent
|
||||
// This should not happen
|
||||
if parentIndex == NonExistentNode {
|
||||
return invalidRoots, errInvalidNodeIndex
|
||||
// Check if last valid hash is an ancestor of the passed node
|
||||
firstInvalidIndex := node.parent
|
||||
for ; firstInvalidIndex != NonExistentNode && firstInvalidIndex != lastValidIndex; firstInvalidIndex = node.parent {
|
||||
node = f.store.nodes[firstInvalidIndex]
|
||||
}
|
||||
// Update the weights of the nodes subtracting the INVALID node's weight
|
||||
|
||||
// if the last valid hash is not an ancestor of the invalid block, we
|
||||
// just remove the invalid block.
|
||||
if node.parent != lastValidIndex {
|
||||
node = f.store.nodes[invalidIndex]
|
||||
firstInvalidIndex = invalidIndex
|
||||
lastValidIndex = node.parent
|
||||
if lastValidIndex == NonExistentNode {
|
||||
return invalidRoots, errInvalidFinalizedNode
|
||||
}
|
||||
} else {
|
||||
firstInvalidIndex = f.store.nodesIndices[node.root]
|
||||
}
|
||||
|
||||
// Update the weights of the nodes subtracting the first INVALID node's weight
|
||||
weight := node.weight
|
||||
node = f.store.nodes[parentIndex]
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
node.weight -= weight
|
||||
if node.parent == NonExistentNode {
|
||||
break
|
||||
}
|
||||
node = f.store.nodes[node.parent]
|
||||
}
|
||||
parent := copyNode(f.store.nodes[parentIndex])
|
||||
|
||||
// delete the invalid node, order is important
|
||||
f.store.nodes = append(f.store.nodes[:idx], f.store.nodes[idx+1:]...)
|
||||
delete(f.store.nodesIndices, root)
|
||||
invalidRoots = append(invalidRoots, root)
|
||||
// Fix parent and best child for each node
|
||||
for _, node := range f.store.nodes {
|
||||
if node.parent == NonExistentNode {
|
||||
node.parent = NonExistentNode
|
||||
} else if node.parent > idx {
|
||||
node.parent -= 1
|
||||
}
|
||||
if node.bestChild == NonExistentNode || node.bestChild == idx {
|
||||
node.bestChild = NonExistentNode
|
||||
} else if node.bestChild > idx {
|
||||
node.bestChild -= 1
|
||||
}
|
||||
if node.bestDescendant == NonExistentNode || node.bestDescendant == idx {
|
||||
node.bestDescendant = NonExistentNode
|
||||
} else if node.bestDescendant > idx {
|
||||
node.bestDescendant -= 1
|
||||
}
|
||||
var validNode *Node
|
||||
for index := lastValidIndex; index != NonExistentNode; index = validNode.parent {
|
||||
validNode = f.store.nodes[index]
|
||||
validNode.weight -= weight
|
||||
}
|
||||
|
||||
// Update the parent's best child and best descendant if necessary.
|
||||
if parent.bestChild == idx || parent.bestDescendant == idx {
|
||||
for childIndex, child := range f.store.nodes {
|
||||
if child.parent == parentIndex {
|
||||
err := f.store.updateBestChildAndDescendant(
|
||||
parentIndex, uint64(childIndex))
|
||||
if err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
break
|
||||
// Find the current proposer boost (it should be set to zero if an
|
||||
// INVALID block was boosted)
|
||||
f.store.proposerBoostLock.RLock()
|
||||
boostRoot := f.store.proposerBoostRoot
|
||||
previousBoostRoot := f.store.previousProposerBoostRoot
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
|
||||
// Remove the invalid roots from our store maps and adjust their weight
|
||||
// to zero
|
||||
boosted := node.root == boostRoot
|
||||
previouslyBoosted := node.root == previousBoostRoot
|
||||
|
||||
invalidIndices := map[uint64]bool{firstInvalidIndex: true}
|
||||
node.status = invalid
|
||||
node.weight = 0
|
||||
delete(f.store.nodesIndices, node.root)
|
||||
delete(f.store.canonicalNodes, node.root)
|
||||
delete(f.store.payloadIndices, node.payloadHash)
|
||||
for index := firstInvalidIndex + 1; index < uint64(len(f.store.nodes)); index++ {
|
||||
invalidNode := f.store.nodes[index]
|
||||
if _, ok := invalidIndices[invalidNode.parent]; !ok {
|
||||
continue
|
||||
}
|
||||
if invalidNode.status == valid {
|
||||
return invalidRoots, errInvalidOptimisticStatus
|
||||
}
|
||||
if !boosted && invalidNode.root == boostRoot {
|
||||
boosted = true
|
||||
}
|
||||
if !previouslyBoosted && invalidNode.root == previousBoostRoot {
|
||||
previouslyBoosted = true
|
||||
}
|
||||
invalidNode.status = invalid
|
||||
invalidIndices[index] = true
|
||||
invalidNode.weight = 0
|
||||
delete(f.store.nodesIndices, invalidNode.root)
|
||||
delete(f.store.canonicalNodes, invalidNode.root)
|
||||
delete(f.store.payloadIndices, invalidNode.payloadHash)
|
||||
}
|
||||
if boosted {
|
||||
if err := f.ResetBoostedProposerRoot(ctx); err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
if previouslyBoosted {
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
f.store.previousProposerBoostScore = 0
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
}
|
||||
|
||||
for index := range invalidIndices {
|
||||
invalidRoots = append(invalidRoots, f.store.nodes[index].root)
|
||||
}
|
||||
|
||||
// Update the best child and descendant
|
||||
for i := len(f.store.nodes) - 1; i >= 0; i-- {
|
||||
n := f.store.nodes[i]
|
||||
if n.parent != NonExistentNode {
|
||||
if err := f.store.updateBestChildAndDescendant(n.parent, uint64(i)); err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if the parent is not a synced_tip.
|
||||
f.syncedTips.Lock()
|
||||
defer f.syncedTips.Unlock()
|
||||
parentRoot := parent.root
|
||||
_, ok = f.syncedTips.validatedTips[parentRoot]
|
||||
if !ok {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
leaves, err := f.store.leaves()
|
||||
if err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
|
||||
for _, i := range leaves {
|
||||
node = f.store.nodes[i]
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
|
||||
// Return early if the parent is still a synced tip
|
||||
if node.root == parentRoot {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
_, ok = f.syncedTips.validatedTips[node.root]
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
if node.parent == NonExistentNode {
|
||||
break
|
||||
}
|
||||
node = f.store.nodes[node.parent]
|
||||
}
|
||||
}
|
||||
delete(f.syncedTips.validatedTips, parentRoot)
|
||||
syncedTipsCount.Set(float64(len(f.syncedTips.validatedTips)))
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
@@ -10,116 +10,38 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
// We test the algorithm to check the optimistic status of a node. The
|
||||
// status for this test is the following branching diagram
|
||||
//
|
||||
// -- E -- F
|
||||
// /
|
||||
// -- C -- D
|
||||
// /
|
||||
// 0 -- 1 -- A -- B -- J -- K
|
||||
// \ /
|
||||
// -- G -- H -- I
|
||||
//
|
||||
// Here nodes 0, 1, A, B, C, D are fully validated and nodes
|
||||
// E, F, G, H, J, K are optimistic.
|
||||
// Synced Tips are nodes B, C, D
|
||||
// nodes 0 and 1 are outside the Fork Choice Store.
|
||||
func slicesEqual(a, b [][32]byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
func TestOptimistic(t *testing.T) {
|
||||
mapA := make(map[[32]byte]bool, len(a))
|
||||
for _, root := range a {
|
||||
mapA[root] = true
|
||||
}
|
||||
for _, root := range b {
|
||||
_, ok := mapA[root]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestOptimistic_Outside_ForkChoice(t *testing.T) {
|
||||
root0 := bytesutil.ToBytes32([]byte("hello0"))
|
||||
root1 := bytesutil.ToBytes32([]byte("hello1"))
|
||||
|
||||
nodeA := &Node{
|
||||
slot: types.Slot(100),
|
||||
root: bytesutil.ToBytes32([]byte("helloA")),
|
||||
bestChild: 1,
|
||||
}
|
||||
nodeB := &Node{
|
||||
slot: types.Slot(101),
|
||||
root: bytesutil.ToBytes32([]byte("helloB")),
|
||||
bestChild: 2,
|
||||
parent: 0,
|
||||
}
|
||||
nodeC := &Node{
|
||||
slot: types.Slot(102),
|
||||
root: bytesutil.ToBytes32([]byte("helloC")),
|
||||
bestChild: 3,
|
||||
parent: 1,
|
||||
}
|
||||
nodeD := &Node{
|
||||
slot: types.Slot(103),
|
||||
root: bytesutil.ToBytes32([]byte("helloD")),
|
||||
bestChild: NonExistentNode,
|
||||
parent: 2,
|
||||
}
|
||||
nodeE := &Node{
|
||||
slot: types.Slot(103),
|
||||
root: bytesutil.ToBytes32([]byte("helloE")),
|
||||
bestChild: 5,
|
||||
parent: 2,
|
||||
}
|
||||
nodeF := &Node{
|
||||
slot: types.Slot(104),
|
||||
root: bytesutil.ToBytes32([]byte("helloF")),
|
||||
bestChild: NonExistentNode,
|
||||
parent: 4,
|
||||
}
|
||||
nodeG := &Node{
|
||||
slot: types.Slot(102),
|
||||
root: bytesutil.ToBytes32([]byte("helloG")),
|
||||
bestChild: 7,
|
||||
parent: 1,
|
||||
}
|
||||
nodeH := &Node{
|
||||
slot: types.Slot(103),
|
||||
root: bytesutil.ToBytes32([]byte("helloH")),
|
||||
bestChild: 8,
|
||||
parent: 6,
|
||||
}
|
||||
nodeI := &Node{
|
||||
slot: types.Slot(104),
|
||||
root: bytesutil.ToBytes32([]byte("helloI")),
|
||||
bestChild: NonExistentNode,
|
||||
parent: 7,
|
||||
}
|
||||
nodeJ := &Node{
|
||||
slot: types.Slot(103),
|
||||
root: bytesutil.ToBytes32([]byte("helloJ")),
|
||||
bestChild: 10,
|
||||
parent: 6,
|
||||
}
|
||||
nodeK := &Node{
|
||||
slot: types.Slot(104),
|
||||
root: bytesutil.ToBytes32([]byte("helloK")),
|
||||
bestChild: NonExistentNode,
|
||||
parent: 9,
|
||||
status: valid,
|
||||
}
|
||||
nodes := []*Node{
|
||||
nodeA,
|
||||
nodeB,
|
||||
nodeC,
|
||||
nodeD,
|
||||
nodeE,
|
||||
nodeF,
|
||||
nodeG,
|
||||
nodeH,
|
||||
nodeI,
|
||||
nodeJ,
|
||||
nodeK,
|
||||
}
|
||||
ni := map[[32]byte]uint64{
|
||||
nodeA.root: 0,
|
||||
nodeB.root: 1,
|
||||
nodeC.root: 2,
|
||||
nodeD.root: 3,
|
||||
nodeE.root: 4,
|
||||
nodeF.root: 5,
|
||||
nodeG.root: 6,
|
||||
nodeH.root: 7,
|
||||
nodeI.root: 8,
|
||||
nodeJ.root: 9,
|
||||
nodeK.root: 10,
|
||||
}
|
||||
|
||||
s := &Store{
|
||||
@@ -127,82 +49,14 @@ func TestOptimistic(t *testing.T) {
|
||||
nodesIndices: ni,
|
||||
}
|
||||
|
||||
tips := map[[32]byte]types.Slot{
|
||||
nodeB.root: nodeB.slot,
|
||||
nodeC.root: nodeC.slot,
|
||||
nodeD.root: nodeD.slot,
|
||||
}
|
||||
st := &optimisticStore{
|
||||
validatedTips: tips,
|
||||
}
|
||||
f := &ForkChoice{
|
||||
store: s,
|
||||
syncedTips: st,
|
||||
store: s,
|
||||
}
|
||||
ctx := context.Background()
|
||||
// We test the implementation of boundarySyncedTips
|
||||
min, max := f.boundarySyncedTips()
|
||||
require.Equal(t, min, types.Slot(101), "minimum tip slot is different")
|
||||
require.Equal(t, max, types.Slot(103), "maximum tip slot is different")
|
||||
|
||||
// We test first nodes outside the Fork Choice store
|
||||
_, err := f.IsOptimistic(ctx, root0)
|
||||
_, err := f.IsOptimistic(root0)
|
||||
require.ErrorIs(t, ErrUnknownNodeRoot, err)
|
||||
|
||||
_, err = f.IsOptimistic(ctx, root1)
|
||||
require.ErrorIs(t, ErrUnknownNodeRoot, err)
|
||||
|
||||
// We check all nodes in the Fork Choice store.
|
||||
op, err := f.IsOptimistic(ctx, nodeA.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, false)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeB.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, false)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeC.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, false)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeD.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, false)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeE.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeF.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeG.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeH.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeI.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeJ.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeK.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
// request a write Lock to synced Tips regression #10289
|
||||
f.syncedTips.Lock()
|
||||
defer f.syncedTips.Unlock()
|
||||
}
|
||||
|
||||
// This tests the algorithm to update syncedTips
|
||||
// This tests the algorithm to update optimistic Status
|
||||
// We start with the following diagram
|
||||
//
|
||||
// E -- F
|
||||
@@ -213,165 +67,105 @@ func TestOptimistic(t *testing.T) {
|
||||
// \ \
|
||||
// J -- K -- L
|
||||
//
|
||||
// And every block in the Fork choice is optimistic. Synced_Tips contains a
|
||||
// single block that is outside of Fork choice
|
||||
// The Chain A -- B -- C -- D -- E is VALID.
|
||||
//
|
||||
func TestSetOptimisticToValid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new VALID block
|
||||
tips map[[32]byte]types.Slot // the old synced tips
|
||||
newTips map[[32]byte]types.Slot // the updated synced tips
|
||||
wantedErr error
|
||||
root [32]byte // the root of the new VALID block
|
||||
testRoot [32]byte // root of the node we will test optimistic status
|
||||
wantedOptimistic bool // wanted optimistic status for tested node
|
||||
wantedErr error // wanted error message
|
||||
}{
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{[32]byte{'z'}: 90},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'i'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'f'},
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'e'}: 103,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'e'}: 104,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
[32]byte{'j'}: 102,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'b'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'z'}: 90,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'h'}: 105,
|
||||
},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'b'},
|
||||
[32]byte{'b'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'b'},
|
||||
[32]byte{'h'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'e'}: 104,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'e'}: 104,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
[32]byte{'b'},
|
||||
[32]byte{'a'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'k'},
|
||||
[32]byte{'k'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'k'},
|
||||
[32]byte{'l'},
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'p'},
|
||||
map[[32]byte]types.Slot{},
|
||||
map[[32]byte]types.Slot{},
|
||||
errInvalidNodeIndex,
|
||||
[32]byte{},
|
||||
false,
|
||||
ErrUnknownNodeRoot,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
f.syncedTips.Lock()
|
||||
f.syncedTips.validatedTips = tc.tips
|
||||
f.syncedTips.Unlock()
|
||||
err := f.SetOptimisticToValid(context.Background(), tc.root)
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
require.NoError(t, f.SetOptimisticToValid(context.Background(), [32]byte{'e'}))
|
||||
optimistic, err := f.IsOptimistic([32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
|
||||
err = f.SetOptimisticToValid(context.Background(), tc.root)
|
||||
if tc.wantedErr != nil {
|
||||
require.ErrorIs(t, err, tc.wantedErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
f.syncedTips.RLock()
|
||||
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newTips)
|
||||
f.syncedTips.RUnlock()
|
||||
optimistic, err := f.IsOptimistic(tc.testRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantedOptimistic, optimistic)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -387,70 +181,81 @@ func TestSetOptimisticToValid(t *testing.T) {
|
||||
// \ \
|
||||
// J(1) -- K(1) -- L(0)
|
||||
//
|
||||
// And every block in the Fork choice is optimistic. Synced_Tips contains a
|
||||
// single block that is outside of Fork choice. The numbers in parentheses are
|
||||
// the weights of the nodes before removal
|
||||
// And the chain A -- B -- C -- D -- E has been fully validated. The numbers in parentheses are
|
||||
// the weights of the nodes.
|
||||
//
|
||||
func TestSetOptimisticToInvalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new INVALID block
|
||||
tips map[[32]byte]types.Slot // the old synced tips
|
||||
wantedParentTip bool
|
||||
name string // test description
|
||||
root [32]byte // the root of the new INVALID block
|
||||
payload [32]byte // the payload of the last valid hash
|
||||
newBestChild uint64
|
||||
newBestDescendant uint64
|
||||
newParentWeight uint64
|
||||
returnedRoots [][32]byte
|
||||
}{
|
||||
{
|
||||
"Remove tip, parent was valid",
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
false,
|
||||
[32]byte{'B'},
|
||||
3,
|
||||
4,
|
||||
8,
|
||||
[][32]byte{[32]byte{'j'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
},
|
||||
true,
|
||||
3,
|
||||
4,
|
||||
12,
|
||||
8,
|
||||
[][32]byte{[32]byte{'j'}},
|
||||
},
|
||||
{
|
||||
"Remove tip, parent was optimistic",
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'h'}: 105,
|
||||
},
|
||||
true,
|
||||
[32]byte{'H'},
|
||||
NonExistentNode,
|
||||
NonExistentNode,
|
||||
1,
|
||||
[][32]byte{[32]byte{'i'}},
|
||||
},
|
||||
{
|
||||
"Remove tip, lvh is inner and valid",
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
false,
|
||||
[32]byte{'D'},
|
||||
6,
|
||||
8,
|
||||
3,
|
||||
[][32]byte{[32]byte{'g'}, [32]byte{'h'}, [32]byte{'k'}, [32]byte{'i'}, [32]byte{'l'}},
|
||||
},
|
||||
{
|
||||
"Remove inner, lvh is inner and optimistic",
|
||||
[32]byte{'h'},
|
||||
[32]byte{'G'},
|
||||
10,
|
||||
12,
|
||||
2,
|
||||
[][32]byte{[32]byte{'h'}, [32]byte{'i'}},
|
||||
},
|
||||
{
|
||||
"Remove tip, lvh is inner and optimistic",
|
||||
[32]byte{'l'},
|
||||
[32]byte{'G'},
|
||||
9,
|
||||
11,
|
||||
2,
|
||||
[][32]byte{[32]byte{'k'}, [32]byte{'l'}},
|
||||
},
|
||||
{
|
||||
"Remove tip, lvh is not an ancestor",
|
||||
[32]byte{'j'},
|
||||
[32]byte{'C'},
|
||||
5,
|
||||
12,
|
||||
7,
|
||||
[][32]byte{[32]byte{'j'}},
|
||||
},
|
||||
{
|
||||
"Remove inner, lvh is not an ancestor",
|
||||
[32]byte{'g'},
|
||||
[32]byte{'J'},
|
||||
NonExistentNode,
|
||||
NonExistentNode,
|
||||
1,
|
||||
[][32]byte{[32]byte{'i'}},
|
||||
[][32]byte{[32]byte{'g'}, [32]byte{'h'}, [32]byte{'k'}, [32]byte{'i'}, [32]byte{'l'}},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
@@ -458,184 +263,71 @@ func TestSetOptimisticToInvalid(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
|
||||
f.syncedTips.Lock()
|
||||
f.syncedTips.validatedTips = tc.tips
|
||||
f.syncedTips.Unlock()
|
||||
f.store.nodesLock.Lock()
|
||||
for i, node := range f.store.nodes {
|
||||
node.weight = weights[i]
|
||||
}
|
||||
// Make j be the best child and descendant of b
|
||||
nodeB := f.store.nodes[2]
|
||||
nodeB.bestChild = 4
|
||||
nodeB.bestDescendant = 4
|
||||
idx := f.store.nodesIndices[tc.root]
|
||||
node := f.store.nodes[idx]
|
||||
parentIndex := node.parent
|
||||
require.NotEqual(t, NonExistentNode, parentIndex)
|
||||
parent := f.store.nodes[parentIndex]
|
||||
f.store.nodesLock.Unlock()
|
||||
roots, err := f.SetOptimisticToInvalid(context.Background(), tc.root)
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, [32]byte{'e'}))
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, tc.root, tc.payload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tc.returnedRoots, roots)
|
||||
f.syncedTips.RLock()
|
||||
_, parentSyncedTip := f.syncedTips.validatedTips[parent.root]
|
||||
f.syncedTips.RUnlock()
|
||||
require.Equal(t, tc.wantedParentTip, parentSyncedTip)
|
||||
require.Equal(t, tc.newBestChild, parent.bestChild)
|
||||
require.Equal(t, tc.newBestDescendant, parent.bestDescendant)
|
||||
require.Equal(t, tc.newParentWeight, parent.weight)
|
||||
}
|
||||
}
|
||||
|
||||
// This tests the algorithm to find the tip of a given node
|
||||
// We start with the following diagram
|
||||
//
|
||||
// E -- F
|
||||
// /
|
||||
// C -- D
|
||||
// / \
|
||||
// A -- B G -- H -- I
|
||||
// \ \
|
||||
// J -- K -- L
|
||||
//
|
||||
//
|
||||
func TestFindSyncedTip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the block
|
||||
tips map[[32]byte]types.Slot // the synced tips
|
||||
wanted [32]byte // the root of expected tip
|
||||
}{
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
[32]byte{'g'},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'h'}: 104,
|
||||
[32]byte{'k'}: 106,
|
||||
},
|
||||
[32]byte{'d'},
|
||||
},
|
||||
{
|
||||
[32]byte{'e'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 103,
|
||||
},
|
||||
[32]byte{'d'},
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'b'},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'g'},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
f.store.nodesLock.RLock()
|
||||
node := f.store.nodes[f.store.nodesIndices[tc.root]]
|
||||
syncedTips := &optimisticStore{
|
||||
validatedTips: tc.tips,
|
||||
}
|
||||
syncedTips.RLock()
|
||||
idx, err := f.store.findSyncedTip(ctx, node, syncedTips)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wanted, f.store.nodes[idx].root)
|
||||
|
||||
_, ok := f.store.nodesIndices[tc.root]
|
||||
require.Equal(t, false, ok)
|
||||
lvh := f.store.nodes[f.store.payloadIndices[tc.payload]]
|
||||
require.Equal(t, true, slicesEqual(tc.returnedRoots, roots))
|
||||
require.Equal(t, tc.newBestChild, lvh.bestChild)
|
||||
require.Equal(t, tc.newBestDescendant, lvh.bestDescendant)
|
||||
require.Equal(t, tc.newParentWeight, lvh.weight)
|
||||
require.Equal(t, syncing, f.store.nodes[8].status /* F */)
|
||||
require.Equal(t, valid, f.store.nodes[5].status /* E */)
|
||||
f.store.nodesLock.RUnlock()
|
||||
syncedTips.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// This is a regression test (10341)
|
||||
func TestIsOptimistic_DeadLock(t *testing.T) {
|
||||
func TestSetOptimisticToInvalid_InvalidRoots(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 90, [32]byte{'b'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'d'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'e'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
tips := map[[32]byte]types.Slot{
|
||||
[32]byte{'a'}: 100,
|
||||
[32]byte{'d'}: 102,
|
||||
}
|
||||
f.syncedTips.validatedTips = tips
|
||||
_, err := f.IsOptimistic(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'e'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'c'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'p'}, [32]byte{'B'})
|
||||
require.ErrorIs(t, ErrUnknownNodeRoot, err)
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'a'}, [32]byte{'p'})
|
||||
require.ErrorIs(t, errInvalidFinalizedNode, err)
|
||||
}
|
||||
|
||||
// This is a regression test (10445)
|
||||
func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.proposerBoostRoot = [32]byte{'c'}
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'b'}
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
|
||||
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
f.store.proposerBoostLock.RLock()
|
||||
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -30,43 +32,14 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch, finalizedRoot [32]byte) *Fo
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodes: make([]*Node, 0),
|
||||
nodesIndices: make(map[[32]byte]uint64),
|
||||
payloadIndices: make(map[[32]byte]uint64),
|
||||
canonicalNodes: make(map[[32]byte]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
}
|
||||
|
||||
b := make([]uint64, 0)
|
||||
v := make([]Vote, 0)
|
||||
st := &optimisticStore{
|
||||
validatedTips: make(map[[32]byte]types.Slot),
|
||||
}
|
||||
return &ForkChoice{store: s, balances: b, votes: v, syncedTips: st}
|
||||
}
|
||||
|
||||
// SetSyncedTips sets the synced and validated tips from the passed map
|
||||
func (f *ForkChoice) SetSyncedTips(tips map[[32]byte]types.Slot) error {
|
||||
if len(tips) == 0 {
|
||||
return errInvalidSyncedTips
|
||||
}
|
||||
newTips := make(map[[32]byte]types.Slot, len(tips))
|
||||
for k, v := range tips {
|
||||
newTips[k] = v
|
||||
}
|
||||
f.syncedTips.Lock()
|
||||
defer f.syncedTips.Unlock()
|
||||
f.syncedTips.validatedTips = newTips
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncedTips returns the synced and validated tips from the fork choice store.
|
||||
func (f *ForkChoice) SyncedTips() map[[32]byte]types.Slot {
|
||||
f.syncedTips.RLock()
|
||||
defer f.syncedTips.RUnlock()
|
||||
|
||||
m := make(map[[32]byte]types.Slot)
|
||||
for k, v := range f.syncedTips.validatedTips {
|
||||
m[k] = v
|
||||
}
|
||||
return m
|
||||
return &ForkChoice{store: s, balances: b, votes: v}
|
||||
}
|
||||
|
||||
// Head returns the head root from fork choice store.
|
||||
@@ -159,7 +132,7 @@ func (f *ForkChoice) InsertOptimisticBlock(
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
// root is different than the current store finalized root, and the number of the store has met prune threshold.
|
||||
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
return f.store.prune(ctx, finalizedRoot, f.syncedTips)
|
||||
return f.store.prune(ctx, finalizedRoot)
|
||||
}
|
||||
|
||||
// HasNode returns true if the node exists in fork choice store,
|
||||
@@ -375,6 +348,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
|
||||
s.nodesIndices[root] = index
|
||||
s.payloadIndices[payloadHash] = index
|
||||
s.nodes = append(s.nodes, n)
|
||||
|
||||
// Update parent with the best child and descendant only if it's available.
|
||||
@@ -454,6 +428,16 @@ func (s *Store) applyWeightChanges(
|
||||
if nodeDelta < 0 {
|
||||
d := uint64(-nodeDelta)
|
||||
if n.weight < d {
|
||||
s.proposerBoostLock.RLock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"nodeDelta": d,
|
||||
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(n.root[:])),
|
||||
"nodeWeight": n.weight,
|
||||
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.proposerBoostRoot[:])),
|
||||
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.previousProposerBoostRoot[:])),
|
||||
"previousProposerBoostScore": s.previousProposerBoostScore,
|
||||
}).Warning("node with invalid weight, setting it to zero")
|
||||
s.proposerBoostLock.RUnlock()
|
||||
n.weight = 0
|
||||
} else {
|
||||
n.weight -= d
|
||||
@@ -599,7 +583,7 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
|
||||
// prune prunes the store with the new finalized root. The tree is only
|
||||
// pruned if the input finalized root are different than the one in stored and
|
||||
// the number of the nodes in store has met prune threshold.
|
||||
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *optimisticStore) error {
|
||||
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
|
||||
defer span.End()
|
||||
|
||||
@@ -619,18 +603,9 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
|
||||
return nil
|
||||
}
|
||||
|
||||
// Traverse through the node list starting from the finalized node at index 0.
|
||||
// Nodes that are not branching off from the finalized node will be removed.
|
||||
syncedTips.Lock()
|
||||
defer syncedTips.Unlock()
|
||||
|
||||
canonicalNodesMap := make(map[uint64]uint64, uint64(len(s.nodes))-finalizedIndex)
|
||||
canonicalNodes := make([]*Node, 1, uint64(len(s.nodes))-finalizedIndex)
|
||||
finalizedNode := s.nodes[finalizedIndex]
|
||||
finalizedTipIndex, err := s.findSyncedTip(ctx, finalizedNode, syncedTips)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedNode.parent = NonExistentNode
|
||||
canonicalNodes[0] = finalizedNode
|
||||
canonicalNodesMap[finalizedIndex] = uint64(0)
|
||||
@@ -639,20 +614,22 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
|
||||
node := copyNode(s.nodes[idx])
|
||||
parentIdx, ok := canonicalNodesMap[node.parent]
|
||||
if ok {
|
||||
s.nodesIndices[node.root] = uint64(len(canonicalNodes))
|
||||
canonicalNodesMap[idx] = uint64(len(canonicalNodes))
|
||||
currentIndex := uint64(len(canonicalNodes))
|
||||
s.nodesIndices[node.root] = currentIndex
|
||||
s.payloadIndices[node.payloadHash] = currentIndex
|
||||
canonicalNodesMap[idx] = currentIndex
|
||||
node.parent = parentIdx
|
||||
canonicalNodes = append(canonicalNodes, node)
|
||||
} else {
|
||||
// Remove node and synced tip that is not part of finalized branch.
|
||||
// Remove node that is not part of finalized branch.
|
||||
delete(s.nodesIndices, node.root)
|
||||
_, ok := syncedTips.validatedTips[node.root]
|
||||
if ok && idx != finalizedTipIndex {
|
||||
delete(syncedTips.validatedTips, node.root)
|
||||
}
|
||||
delete(s.canonicalNodes, node.root)
|
||||
delete(s.payloadIndices, node.payloadHash)
|
||||
}
|
||||
}
|
||||
s.nodesIndices[finalizedRoot] = uint64(0)
|
||||
s.canonicalNodes[finalizedRoot] = true
|
||||
s.payloadIndices[finalizedNode.payloadHash] = uint64(0)
|
||||
|
||||
// Recompute the best child and descendant for each canonical nodes.
|
||||
for _, node := range canonicalNodes {
|
||||
@@ -666,7 +643,6 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
|
||||
|
||||
s.nodes = canonicalNodes
|
||||
prunedCount.Inc()
|
||||
syncedTipsCount.Set(float64(len(syncedTips.validatedTips)))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -674,6 +650,10 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
|
||||
// Any node with diff finalized or justified epoch than the ones in fork choice store
|
||||
// should not be viable to head.
|
||||
func (s *Store) leadsToViableHead(node *Node) (bool, error) {
|
||||
if node.status == invalid {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var bestDescendantViable bool
|
||||
bestDescendantIndex := node.bestDescendant
|
||||
|
||||
@@ -705,20 +685,6 @@ func (s *Store) viableForHead(node *Node) bool {
|
||||
return justified && finalized
|
||||
}
|
||||
|
||||
// Returns the list of leaves in the Fork Choice store.
|
||||
// These are all the nodes that have NonExistentNode as best child.
|
||||
// This internal method assumes that the caller holds a lock in s.nodesLock.
|
||||
func (s *Store) leaves() ([]uint64, error) {
|
||||
var leaves []uint64
|
||||
for i := uint64(0); i < uint64(len(s.nodes)); i++ {
|
||||
node := s.nodes[i]
|
||||
if node.bestChild == NonExistentNode {
|
||||
leaves = append(leaves, i)
|
||||
}
|
||||
}
|
||||
return leaves, nil
|
||||
}
|
||||
|
||||
// Tips returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestStore_Head_ContextCancelled(t *testing.T) {
|
||||
|
||||
func TestStore_Insert_UnknownParent(t *testing.T) {
|
||||
// The new node does not have a parent.
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64), payloadIndices: make(map[[32]byte]uint64)}
|
||||
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{'B'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
assert.Equal(t, 1, len(s.nodes), "Did not insert block")
|
||||
assert.Equal(t, 1, len(s.nodesIndices), "Did not insert block")
|
||||
@@ -113,7 +113,7 @@ func TestStore_Insert_UnknownParent(t *testing.T) {
|
||||
func TestStore_Insert_KnownParent(t *testing.T) {
|
||||
// Similar to UnknownParent test, but this time the new node has a valid parent already in store.
|
||||
// The new node builds on top of the parent.
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64), payloadIndices: make(map[[32]byte]uint64)}
|
||||
s.nodes = []*Node{{}}
|
||||
p := [32]byte{'B'}
|
||||
s.nodesIndices[p] = 0
|
||||
@@ -336,11 +336,10 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
|
||||
})
|
||||
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, pruneThreshold: 100}
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned,
|
||||
// but PruneThreshold is at 100 so nothing will be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
|
||||
assert.Equal(t, 100, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 100, len(s.nodesIndices), "Incorrect node indices count")
|
||||
}
|
||||
@@ -376,11 +375,10 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
syncedTips := &optimisticStore{}
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
|
||||
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
|
||||
assert.Equal(t, 1, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 1, len(s.nodesIndices), "Incorrect node indices count")
|
||||
}
|
||||
@@ -415,16 +413,15 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
syncedTips := &optimisticStore{}
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
|
||||
|
||||
// Finalized root is at index 11 so everything before 11 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(10), syncedTips))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
|
||||
assert.Equal(t, 90, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 90, len(s.nodesIndices), "Incorrect node indices count")
|
||||
|
||||
// One more time.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(20), syncedTips))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
|
||||
assert.Equal(t, 80, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 80, len(s.nodesIndices), "Incorrect node indices count")
|
||||
}
|
||||
@@ -444,6 +441,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestDescendant: 1,
|
||||
root: indexToHash(uint64(0)),
|
||||
parent: NonExistentNode,
|
||||
payloadHash: [32]byte{'A'},
|
||||
},
|
||||
{
|
||||
slot: 101,
|
||||
@@ -451,6 +449,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestChild: NonExistentNode,
|
||||
bestDescendant: NonExistentNode,
|
||||
parent: 0,
|
||||
payloadHash: [32]byte{'B'},
|
||||
},
|
||||
{
|
||||
slot: 101,
|
||||
@@ -458,9 +457,9 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
parent: 0,
|
||||
bestChild: NonExistentNode,
|
||||
bestDescendant: NonExistentNode,
|
||||
payloadHash: [32]byte{'C'},
|
||||
},
|
||||
}
|
||||
syncedTips := &optimisticStore{}
|
||||
s := &Store{
|
||||
pruneThreshold: 0,
|
||||
nodes: nodes,
|
||||
@@ -469,9 +468,22 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
indexToHash(uint64(1)): 1,
|
||||
indexToHash(uint64(2)): 2,
|
||||
},
|
||||
canonicalNodes: map[[32]byte]bool{
|
||||
indexToHash(uint64(0)): true,
|
||||
indexToHash(uint64(1)): true,
|
||||
indexToHash(uint64(2)): true,
|
||||
},
|
||||
payloadIndices: map[[32]byte]uint64{
|
||||
[32]byte{'A'}: 0,
|
||||
[32]byte{'B'}: 1,
|
||||
[32]byte{'C'}: 2,
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1)), syncedTips))
|
||||
require.Equal(t, len(s.nodes), 1)
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
|
||||
require.Equal(t, 1, len(s.nodes))
|
||||
require.Equal(t, 1, len(s.nodesIndices))
|
||||
require.Equal(t, 1, len(s.canonicalNodes))
|
||||
require.Equal(t, 1, len(s.payloadIndices))
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
@@ -486,38 +498,74 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
// J -- K -- L
|
||||
//
|
||||
//
|
||||
// Synced tips are B, D and E. And we finalize F. All that is left in fork
|
||||
// choice is F, and the only synced tip left is E which is now away from Fork
|
||||
// Choice.
|
||||
func TestStore_PruneSyncedTips(t *testing.T) {
|
||||
func TestStore_PruneBranched(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
syncedTips := &optimisticStore{
|
||||
validatedTips: map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'e'}: 104,
|
||||
tests := []struct {
|
||||
finalizedRoot [32]byte
|
||||
wantedCanonical [32]byte
|
||||
wantedNonCanonical [32]byte
|
||||
canonicalCount int
|
||||
payloadHash [32]byte
|
||||
payloadIndex uint64
|
||||
nonExistentPayload [32]byte
|
||||
}{
|
||||
{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'f'},
|
||||
[32]byte{'a'},
|
||||
1,
|
||||
[32]byte{'F'},
|
||||
0,
|
||||
[32]byte{'H'},
|
||||
},
|
||||
{
|
||||
[32]byte{'d'},
|
||||
[32]byte{'e'},
|
||||
[32]byte{'i'},
|
||||
3,
|
||||
[32]byte{'E'},
|
||||
1,
|
||||
[32]byte{'C'},
|
||||
},
|
||||
{
|
||||
[32]byte{'b'},
|
||||
[32]byte{'f'},
|
||||
[32]byte{'h'},
|
||||
5,
|
||||
[32]byte{'D'},
|
||||
3,
|
||||
[32]byte{'A'},
|
||||
},
|
||||
}
|
||||
f.syncedTips = syncedTips
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, 1, len(f.syncedTips.validatedTips))
|
||||
_, ok := f.syncedTips.validatedTips[[32]byte{'e'}]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
for _, tc := range tests {
|
||||
f := setup(1, 1)
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'f'}))
|
||||
|
||||
require.NoError(t, f.Prune(ctx, tc.finalizedRoot))
|
||||
require.Equal(t, tc.canonicalCount, len(f.store.canonicalNodes))
|
||||
require.Equal(t, true, f.IsCanonical(tc.wantedCanonical))
|
||||
require.Equal(t, false, f.IsCanonical(tc.wantedNonCanonical))
|
||||
require.Equal(t, tc.payloadIndex, f.store.payloadIndices[tc.payloadHash])
|
||||
_, ok := f.store.payloadIndices[tc.nonExistentPayload]
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LeadsToViableHead(t *testing.T) {
|
||||
@@ -546,20 +594,6 @@ func TestStore_LeadsToViableHead(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SetSyncedTips(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
tips := make(map[[32]byte]types.Slot)
|
||||
require.ErrorIs(t, errInvalidSyncedTips, f.SetSyncedTips(tips))
|
||||
tips[bytesutil.ToBytes32([]byte{'a'})] = 1
|
||||
require.NoError(t, f.SetSyncedTips(tips))
|
||||
f.syncedTips.RLock()
|
||||
defer f.syncedTips.RUnlock()
|
||||
require.Equal(t, 1, len(f.syncedTips.validatedTips))
|
||||
slot, ok := f.syncedTips.validatedTips[bytesutil.ToBytes32([]byte{'a'})]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, types.Slot(1), slot)
|
||||
}
|
||||
|
||||
func TestStore_ViableForHead(t *testing.T) {
|
||||
tests := []struct {
|
||||
n *Node
|
||||
|
||||
@@ -9,11 +9,10 @@ import (
|
||||
|
||||
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
|
||||
type ForkChoice struct {
|
||||
store *Store
|
||||
votes []Vote // tracks individual validator's last vote.
|
||||
votesLock sync.RWMutex
|
||||
balances []uint64 // tracks individual validator's last justified balances.
|
||||
syncedTips *optimisticStore
|
||||
store *Store
|
||||
votes []Vote // tracks individual validator's last vote.
|
||||
votesLock sync.RWMutex
|
||||
balances []uint64 // tracks individual validator's last justified balances.
|
||||
}
|
||||
|
||||
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
|
||||
@@ -28,6 +27,7 @@ type Store struct {
|
||||
nodes []*Node // list of block nodes, each node is a representation of one block.
|
||||
nodesIndices map[[fieldparams.RootLength]byte]uint64 // the root of block node and the nodes index in the list.
|
||||
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
|
||||
payloadIndices map[[fieldparams.RootLength]byte]uint64 // the payload hash of block node and the index in the list
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
}
|
||||
@@ -44,15 +44,17 @@ type Node struct {
|
||||
weight uint64 // weight of this node.
|
||||
bestChild uint64 // bestChild index of this node.
|
||||
bestDescendant uint64 // bestDescendant of this node.
|
||||
graffiti [fieldparams.RootLength]byte // graffiti of the block node.
|
||||
status status // optimistic status of this node
|
||||
}
|
||||
|
||||
// optimisticStore defines a structure that tracks the tips of the fully
|
||||
// validated blocks tree.
|
||||
type optimisticStore struct {
|
||||
validatedTips map[[32]byte]types.Slot
|
||||
sync.RWMutex
|
||||
}
|
||||
// enum used as optimistic status of a node
|
||||
type status uint8
|
||||
|
||||
const (
|
||||
syncing status = iota // the node is optimistic
|
||||
valid //fully validated node
|
||||
invalid // invalid execution payload
|
||||
)
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
type Vote struct {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
func TestVotes_CanFindHead(t *testing.T) {
|
||||
balances := []uint64{1, 1}
|
||||
f := setup(1, 1)
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
r, err := f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
@@ -249,7 +248,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
// Verify pruning below the prune threshold does not affect head.
|
||||
f.store.pruneThreshold = 1000
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
|
||||
assert.Equal(t, 11, len(f.store.nodes), "Incorrect nodes length after prune")
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
@@ -273,7 +272,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// / \
|
||||
// 9 10
|
||||
f.store.pruneThreshold = 1
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
|
||||
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//api/gateway:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
@@ -94,6 +95,7 @@ type BeaconNode struct {
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
depositCache *depositcache.DepositCache
|
||||
proposerIdsCache *cache.ProposerPayloadIDsCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
@@ -152,6 +154,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
slasherAttestationsFeed: new(event.Feed),
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
proposerIdsCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -585,6 +588,7 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithStateGen(b.stateGen),
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
)
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
@@ -801,6 +805,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
ProposerIdsCache: b.proposerIdsCache,
|
||||
ExecutionEngineCaller: web3Service,
|
||||
})
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"auth.go",
|
||||
"block_cache.go",
|
||||
"block_reader.go",
|
||||
"check_transition_config.go",
|
||||
@@ -16,6 +15,7 @@ go_library(
|
||||
"options.go",
|
||||
"prometheus.go",
|
||||
"provider.go",
|
||||
"rpc_connection.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain",
|
||||
@@ -59,7 +59,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -74,7 +73,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"auth_test.go",
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"check_transition_config_test.go",
|
||||
@@ -125,7 +123,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//trie:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -81,7 +82,7 @@ func (s *Service) checkTransitionConfiguration(
|
||||
return
|
||||
}
|
||||
case tm := <-ticker.C:
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(DefaultRPCHTTPTimeout))
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
|
||||
err = s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
s.handleExchangeConfigurationError(err)
|
||||
if !hasTtdReached {
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
@@ -11,11 +8,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
)
|
||||
|
||||
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
|
||||
const DefaultRPCHTTPTimeout = time.Second * 6
|
||||
|
||||
type Option func(s *Service) error
|
||||
|
||||
// WithHttpEndpoints deduplicates and parses http endpoints for the powchain service to use,
|
||||
@@ -38,20 +33,29 @@ func WithHttpEndpoints(endpointStrings []string) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithJWTSecret for authenticating the execution node JSON-RPC endpoint.
|
||||
func WithJWTSecret(secret []byte) Option {
|
||||
return func(c *Service) error {
|
||||
// WithHttpEndpointsAndJWTSecret for authenticating the execution node JSON-RPC endpoint.
|
||||
func WithHttpEndpointsAndJWTSecret(endpointStrings []string, secret []byte) Option {
|
||||
return func(s *Service) error {
|
||||
if len(secret) == 0 {
|
||||
return nil
|
||||
}
|
||||
authTransport := &jwtTransport{
|
||||
underlyingTransport: http.DefaultTransport,
|
||||
jwtSecret: secret,
|
||||
stringEndpoints := dedupEndpoints(endpointStrings)
|
||||
endpoints := make([]network.Endpoint, len(stringEndpoints))
|
||||
// Overwrite authorization type for all endpoints to be of a bearer
|
||||
// type.
|
||||
for i, e := range stringEndpoints {
|
||||
hEndpoint := HttpEndpoint(e)
|
||||
hEndpoint.Auth.Method = authorization.Bearer
|
||||
hEndpoint.Auth.Value = string(secret)
|
||||
endpoints[i] = hEndpoint
|
||||
}
|
||||
c.cfg.httpRPCClient = &http.Client{
|
||||
Timeout: DefaultRPCHTTPTimeout,
|
||||
Transport: authTransport,
|
||||
// Select first http endpoint in the provided list.
|
||||
var currEndpoint network.Endpoint
|
||||
if len(endpointStrings) > 0 {
|
||||
currEndpoint = endpoints[0]
|
||||
}
|
||||
s.cfg.httpEndpoints = endpoints
|
||||
s.cfg.currHttpEndpoint = currEndpoint
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
176
beacon-chain/powchain/rpc_connection.go
Normal file
176
beacon-chain/powchain/rpc_connection.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
gethRPC "github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
)
|
||||
|
||||
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
|
||||
client, err := s.newRPCClientWithAuth(ctx, currEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not dial execution node")
|
||||
}
|
||||
// Attach the clients to the service struct.
|
||||
fetcher := ethclient.NewClient(client)
|
||||
s.rpcClient = client
|
||||
s.httpLogger = fetcher
|
||||
s.eth1DataFetcher = fetcher
|
||||
|
||||
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, fetcher)
|
||||
if err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not initialize deposit contract caller")
|
||||
}
|
||||
s.depositContractCaller = depositContractCaller
|
||||
|
||||
// Ensure we have the correct chain and deposit IDs.
|
||||
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
|
||||
}
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Every N seconds, defined as a backoffPeriod, attempts to re-establish an execution client
|
||||
// connection and if this does not work, we fallback to the next endpoint if defined.
|
||||
func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
ticker := time.NewTicker(backOffPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
errorLogger(err, "Could not connect to execution client endpoint")
|
||||
s.runError = err
|
||||
s.fallbackToNextEndpoint()
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Forces to retry an execution client connection.
|
||||
func (s *Service) retryExecutionClientConnection(ctx context.Context, err error) {
|
||||
s.runError = err
|
||||
s.updateConnectedETH1(false)
|
||||
// Back off for a while before redialing.
|
||||
time.Sleep(backOffPeriod)
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
s.runError = err
|
||||
return
|
||||
}
|
||||
// Reset run error in the event of a successful connection.
|
||||
s.runError = nil
|
||||
}
|
||||
|
||||
// This performs a health check on our primary endpoint, and if it
|
||||
// is ready to serve we connect to it again. This method is only
|
||||
// relevant if we are on our backup endpoint.
|
||||
func (s *Service) checkDefaultEndpoint(ctx context.Context) {
|
||||
primaryEndpoint := s.cfg.httpEndpoints[0]
|
||||
// Return early if we are running on our primary
|
||||
// endpoint.
|
||||
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil {
|
||||
log.Debugf("Primary endpoint not ready: %v", err)
|
||||
return
|
||||
}
|
||||
s.updateCurrHttpEndpoint(primaryEndpoint)
|
||||
}
|
||||
|
||||
// This is an inefficient way to search for the next endpoint, but given N is
|
||||
// expected to be small, it is fine to search this way.
|
||||
func (s *Service) fallbackToNextEndpoint() {
|
||||
currEndpoint := s.cfg.currHttpEndpoint
|
||||
currIndex := 0
|
||||
totalEndpoints := len(s.cfg.httpEndpoints)
|
||||
|
||||
for i, endpoint := range s.cfg.httpEndpoints {
|
||||
if endpoint.Equals(currEndpoint) {
|
||||
currIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
nextIndex := currIndex + 1
|
||||
if nextIndex >= totalEndpoints {
|
||||
nextIndex = 0
|
||||
}
|
||||
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
|
||||
if nextIndex != currIndex {
|
||||
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
}
|
||||
}
|
||||
|
||||
// Initializes an RPC connection with authentication headers.
|
||||
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
|
||||
// Need to handle ipc and http
|
||||
var client *gethRPC.Client
|
||||
u, err := url.Parse(endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "http", "https":
|
||||
client, err = gethRPC.DialHTTPWithClient(endpoint.Url, endpoint.HttpClient())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "":
|
||||
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Checks the chain ID of the execution client to ensure
|
||||
// it matches local parameters of what Prysm expects.
|
||||
func ensureCorrectExecutionChain(ctx context.Context, client *ethclient.Client) error {
|
||||
cID, err := client.ChainID(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wantChainID := params.BeaconConfig().DepositChainID
|
||||
if cID.Uint64() != wantChainID {
|
||||
return fmt.Errorf("wanted chain ID %d, got %d", wantChainID, cID.Uint64())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
@@ -39,10 +38,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/clientstats"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
@@ -114,6 +111,7 @@ type Chain interface {
|
||||
// RPCDataFetcher defines a subset of methods conformed to by ETH1.0 RPC clients for
|
||||
// fetching eth1 data from the clients.
|
||||
type RPCDataFetcher interface {
|
||||
Close()
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
|
||||
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
|
||||
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
|
||||
@@ -121,6 +119,7 @@ type RPCDataFetcher interface {
|
||||
|
||||
// RPCClient defines the rpc methods required to interact with the eth1 node.
|
||||
type RPCClient interface {
|
||||
Close()
|
||||
BatchCall(b []gethRPC.BatchElem) error
|
||||
CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
|
||||
}
|
||||
@@ -135,7 +134,6 @@ type config struct {
|
||||
eth1HeaderReqLimit uint64
|
||||
beaconNodeStatsUpdater BeaconNodeStatsUpdater
|
||||
httpEndpoints []network.Endpoint
|
||||
httpRPCClient *http.Client
|
||||
currHttpEndpoint network.Endpoint
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
}
|
||||
@@ -228,14 +226,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a web3 service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
|
||||
if err := s.connectToPowChain(); err != nil {
|
||||
log.WithError(err).Fatal("Could not connect to execution endpoint")
|
||||
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
log.WithError(err).Error("Could not connect to execution endpoint")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoint": logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url),
|
||||
}).Info("Connected to Ethereum execution client RPC")
|
||||
// If the chain has not started already and we don't have access to eth1 nodes, we will not be
|
||||
// able to generate the genesis state.
|
||||
if !s.chainStartData.Chainstarted && s.cfg.currHttpEndpoint.Url == "" {
|
||||
@@ -253,7 +246,7 @@ func (s *Service) Start() {
|
||||
s.isRunning = true
|
||||
|
||||
// Poll the execution client connection and fallback if errors occur.
|
||||
go s.pollConnectionStatus()
|
||||
go s.pollConnectionStatus(s.ctx)
|
||||
|
||||
// Check transition configuration for the engine API client in the background.
|
||||
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
|
||||
@@ -266,7 +259,12 @@ func (s *Service) Stop() error {
|
||||
if s.cancel != nil {
|
||||
defer s.cancel()
|
||||
}
|
||||
s.closeClients()
|
||||
if s.rpcClient != nil {
|
||||
s.rpcClient.Close()
|
||||
}
|
||||
if s.eth1DataFetcher != nil {
|
||||
s.eth1DataFetcher.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -338,10 +336,7 @@ func (s *Service) CurrentETH1Endpoint() string {
|
||||
|
||||
// CurrentETH1ConnectionError returns the error (if any) of the current connection.
|
||||
func (s *Service) CurrentETH1ConnectionError() error {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
return err
|
||||
return s.runError
|
||||
}
|
||||
|
||||
// ETH1Endpoints returns the slice of HTTP endpoint URLs (default is 0th element).
|
||||
@@ -358,10 +353,17 @@ func (s *Service) ETH1Endpoints() []string {
|
||||
func (s *Service) ETH1ConnectionErrors() []error {
|
||||
var errs []error
|
||||
for _, ep := range s.cfg.httpEndpoints {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(ep)
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
errs = append(errs, err)
|
||||
client, err := s.newRPCClientWithAuth(s.ctx, ep)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
if err := ensureCorrectExecutionChain(s.ctx, ethclient.NewClient(client)); err != nil {
|
||||
client.Close()
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
client.Close()
|
||||
}
|
||||
return errs
|
||||
}
|
||||
@@ -376,146 +378,6 @@ func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
|
||||
return latestValidBlock, nil
|
||||
}
|
||||
|
||||
func (s *Service) connectToPowChain() error {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not dial execution node")
|
||||
}
|
||||
|
||||
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, httpClient)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize deposit contract caller")
|
||||
}
|
||||
|
||||
if httpClient == nil || rpcClient == nil || depositContractCaller == nil {
|
||||
return errors.New("execution client RPC is nil")
|
||||
}
|
||||
s.httpLogger = httpClient
|
||||
s.eth1DataFetcher = httpClient
|
||||
s.depositContractCaller = depositContractCaller
|
||||
s.rpcClient = rpcClient
|
||||
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) dialETH1Nodes(endpoint network.Endpoint) (*ethclient.Client, *gethRPC.Client, error) {
|
||||
httpRPCClient, err := gethRPC.Dial(endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
httpRPCClient.SetHeader("Authorization", header)
|
||||
}
|
||||
httpClient := ethclient.NewClient(httpRPCClient)
|
||||
// Add a method to clean-up and close clients in the event
|
||||
// of any connection failure.
|
||||
closeClients := func() {
|
||||
httpRPCClient.Close()
|
||||
httpClient.Close()
|
||||
}
|
||||
// Make a simple call to ensure we are actually connected to a working node.
|
||||
cID, err := httpClient.ChainID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
nID, err := httpClient.NetworkID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
if cID.Uint64() != params.BeaconConfig().DepositChainID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect chain id, %d != %d", cID.Uint64(), params.BeaconConfig().DepositChainID)
|
||||
}
|
||||
if nID.Uint64() != params.BeaconConfig().DepositNetworkID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect network id, %d != %d", nID.Uint64(), params.BeaconConfig().DepositNetworkID)
|
||||
}
|
||||
|
||||
return httpClient, httpRPCClient, nil
|
||||
}
|
||||
|
||||
// closes down our active eth1 clients.
|
||||
func (s *Service) closeClients() {
|
||||
gethClient, ok := s.rpcClient.(*gethRPC.Client)
|
||||
if ok {
|
||||
gethClient.Close()
|
||||
}
|
||||
httpClient, ok := s.eth1DataFetcher.(*ethclient.Client)
|
||||
if ok {
|
||||
httpClient.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) pollConnectionStatus() {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
ticker := time.NewTicker(backOffPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
errConnect := s.connectToPowChain()
|
||||
if errConnect != nil {
|
||||
errorLogger(errConnect, "Could not connect to powchain endpoint")
|
||||
s.runError = errConnect
|
||||
s.fallbackToNextEndpoint()
|
||||
continue
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checks if the eth1 node is healthy and ready to serve before
|
||||
// fetching data from it.
|
||||
func (s *Service) isEth1NodeSynced() (bool, error) {
|
||||
syncProg, err := s.eth1DataFetcher.SyncProgress(s.ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if syncProg != nil {
|
||||
return false, nil
|
||||
}
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !eth1HeadIsBehind(head.Time), nil
|
||||
}
|
||||
|
||||
// Reconnect to eth1 node in case of any failure.
|
||||
func (s *Service) retryETH1Node(err error) {
|
||||
s.runError = err
|
||||
s.updateConnectedETH1(false)
|
||||
// Back off for a while before
|
||||
// resuming dialing the eth1 node.
|
||||
time.Sleep(backOffPeriod)
|
||||
if err := s.connectToPowChain(); err != nil {
|
||||
s.runError = err
|
||||
return
|
||||
}
|
||||
// Reset run error in the event of a successful connection.
|
||||
s.runError = nil
|
||||
}
|
||||
|
||||
func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositContainer) error {
|
||||
if len(ctrs) == 0 {
|
||||
return nil
|
||||
@@ -581,6 +443,7 @@ func (s *Service) processBlockHeader(header *gethTypes.Header) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockNumber": s.latestEth1Data.BlockHeight,
|
||||
"blockHash": hexutil.Encode(s.latestEth1Data.BlockHash),
|
||||
"difficulty": header.Difficulty.String(),
|
||||
}).Debug("Latest eth1 chain event")
|
||||
}
|
||||
|
||||
@@ -649,7 +512,7 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
fiveMinutesTimeout := prysmTime.Now().Add(-5 * time.Minute)
|
||||
// check that web3 client is syncing
|
||||
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) {
|
||||
log.Warn("eth1 client is not syncing")
|
||||
log.Warn("Execution client is not syncing")
|
||||
}
|
||||
if !s.chainStartData.Chainstarted {
|
||||
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
|
||||
@@ -679,6 +542,15 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
}
|
||||
|
||||
func (s *Service) initPOWService() {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
|
||||
// Run in a select loop to retry in the event of any failures.
|
||||
for {
|
||||
@@ -689,8 +561,8 @@ func (s *Service) initPOWService() {
|
||||
ctx := s.ctx
|
||||
header, err := s.eth1DataFetcher.HeaderByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve latest ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to retrieve latest execution client header")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -699,14 +571,14 @@ func (s *Service) initPOWService() {
|
||||
s.latestEth1Data.BlockTime = header.Time
|
||||
|
||||
if err := s.processPastLogs(ctx); err != nil {
|
||||
log.Errorf("Unable to process past logs %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to process past deposit contract logs")
|
||||
continue
|
||||
}
|
||||
// Cache eth1 headers from our voting period.
|
||||
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
|
||||
log.Errorf("Unable to process past headers %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to cache headers for execution client votes")
|
||||
continue
|
||||
}
|
||||
// Handle edge case with embedded genesis state by fetching genesis header to determine
|
||||
@@ -719,15 +591,15 @@ func (s *Service) initPOWService() {
|
||||
if genHash != [32]byte{} {
|
||||
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to retrieve proof-of-stake genesis block data")
|
||||
continue
|
||||
}
|
||||
genBlock = genHeader.Number.Uint64()
|
||||
}
|
||||
s.chainStartData.GenesisBlock = genBlock
|
||||
if err := s.savePowchainData(ctx); err != nil {
|
||||
log.Errorf("Unable to save powchain data: %v", err)
|
||||
errorLogger(err, "Unable to save execution client data")
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -756,17 +628,16 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not fetch latest eth1 header")
|
||||
s.retryETH1Node(err)
|
||||
continue
|
||||
}
|
||||
if eth1HeadIsBehind(head.Time) {
|
||||
s.retryExecutionClientConnection(s.ctx, err)
|
||||
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
|
||||
s.retryETH1Node(errFarBehind)
|
||||
continue
|
||||
}
|
||||
s.processBlockHeader(head)
|
||||
s.handleETH1FollowDistance()
|
||||
s.checkDefaultEndpoint()
|
||||
s.checkDefaultEndpoint(s.ctx)
|
||||
case <-chainstartTicker.C:
|
||||
if s.chainStartData.Chainstarted {
|
||||
chainstartTicker.Stop()
|
||||
@@ -853,59 +724,6 @@ func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock
|
||||
return hdr.Number.Uint64(), nil
|
||||
}
|
||||
|
||||
// This performs a health check on our primary endpoint, and if it
|
||||
// is ready to serve we connect to it again. This method is only
|
||||
// relevant if we are on our backup endpoint.
|
||||
func (s *Service) checkDefaultEndpoint() {
|
||||
primaryEndpoint := s.cfg.httpEndpoints[0]
|
||||
// Return early if we are running on our primary
|
||||
// endpoint.
|
||||
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
|
||||
return
|
||||
}
|
||||
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(primaryEndpoint)
|
||||
if err != nil {
|
||||
log.Debugf("Primary endpoint not ready: %v", err)
|
||||
return
|
||||
}
|
||||
log.Info("Primary endpoint ready again, switching back to it")
|
||||
// Close the clients and let our main connection routine
|
||||
// properly connect with it.
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
// Close current active clients.
|
||||
s.closeClients()
|
||||
|
||||
// Switch back to primary endpoint and try connecting
|
||||
// to it again.
|
||||
s.updateCurrHttpEndpoint(primaryEndpoint)
|
||||
s.retryETH1Node(nil)
|
||||
}
|
||||
|
||||
// This is an inefficient way to search for the next endpoint, but given N is expected to be
|
||||
// small ( < 25), it is fine to search this way.
|
||||
func (s *Service) fallbackToNextEndpoint() {
|
||||
currEndpoint := s.cfg.currHttpEndpoint
|
||||
currIndex := 0
|
||||
totalEndpoints := len(s.cfg.httpEndpoints)
|
||||
|
||||
for i, endpoint := range s.cfg.httpEndpoints {
|
||||
if endpoint.Equals(currEndpoint) {
|
||||
currIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
nextIndex := currIndex + 1
|
||||
if nextIndex >= totalEndpoints {
|
||||
nextIndex = 0
|
||||
}
|
||||
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
|
||||
if nextIndex != currIndex {
|
||||
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
}
|
||||
}
|
||||
|
||||
// initializes our service from the provided eth1data object by initializing all the relevant
|
||||
// fields and data.
|
||||
func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ETH1ChainData) error {
|
||||
|
||||
@@ -42,6 +42,8 @@ type goodLogger struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *goodLogger) Close() {}
|
||||
|
||||
func (g *goodLogger) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
if g.backend == nil {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
@@ -80,6 +82,8 @@ type goodFetcher struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *goodFetcher) Close() {}
|
||||
|
||||
func (g *goodFetcher) HeaderByHash(_ context.Context, hash common.Hash) (*gethTypes.Header, error) {
|
||||
if bytes.Equal(hash.Bytes(), common.BytesToHash([]byte{0}).Bytes()) {
|
||||
return nil, fmt.Errorf("expected block hash to be nonzero %v", hash)
|
||||
@@ -225,10 +229,6 @@ func TestService_Eth1Synced(t *testing.T) {
|
||||
now := time.Now()
|
||||
assert.NoError(t, testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0))))
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
synced, err := web3Service.isEth1NodeSynced()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, synced, "Expected eth1 nodes to be synced")
|
||||
}
|
||||
|
||||
func TestFollowBlock_OK(t *testing.T) {
|
||||
@@ -480,8 +480,8 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
|
||||
s.chainStartData.Chainstarted = true
|
||||
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
|
||||
|
||||
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), nil)
|
||||
fDeposits := s.cfg.depositCache.FinalizedDeposits(ctx)
|
||||
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), fDeposits.MerkleTrieIndex, nil)
|
||||
assert.Equal(t, 0, len(deps))
|
||||
}
|
||||
|
||||
|
||||
@@ -144,6 +144,8 @@ type RPCClient struct {
|
||||
Backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *RPCClient) Close() {}
|
||||
|
||||
func (*RPCClient) CallContext(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -424,11 +424,11 @@ type beaconBlockBodyBellatrixJson struct {
|
||||
|
||||
type executionPayloadJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
CoinBase string `json:"coinbase" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptRoot string `json:"receipt_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
Random string `json:"random" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
@@ -441,11 +441,11 @@ type executionPayloadJson struct {
|
||||
|
||||
type executionPayloadHeaderJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
CoinBase string `json:"coinbase" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptRoot string `json:"receipt_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
Random string `json:"random" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
|
||||
@@ -1024,19 +1024,20 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
},
|
||||
TimeFetcher: &mockChain.ChainService{},
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
TimeFetcher: &mockChain.ChainService{},
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
|
||||
@@ -693,7 +693,7 @@ func (bs *Server) GetValidatorPerformance(
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary = vp
|
||||
case version.Altair:
|
||||
case version.Altair, version.Bellatrix:
|
||||
vp, bp, err := altair.InitializePrecomputeValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -871,7 +871,7 @@ func (bs *Server) GetIndividualVotes(
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
|
||||
}
|
||||
case version.Altair:
|
||||
case version.Altair, version.Bellatrix:
|
||||
v, bal, err = altair.InitializePrecomputeValidators(ctx, st)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set up altair pre compute instance: %v", err)
|
||||
|
||||
@@ -2100,6 +2100,76 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
ctx := context.Background()
|
||||
epoch := types.Epoch(1)
|
||||
headState, _ := util.DeterministicGenesisStateBellatrix(t, 32)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
|
||||
defaultBal := params.BeaconConfig().MaxEffectiveBalance
|
||||
extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth
|
||||
balances := []uint64{defaultBal, extraBal, extraBal + params.BeaconConfig().GweiPerEth}
|
||||
require.NoError(t, headState.SetBalances(balances))
|
||||
publicKey1 := bytesutil.ToBytes48([]byte{1})
|
||||
publicKey2 := bytesutil.ToBytes48([]byte{2})
|
||||
publicKey3 := bytesutil.ToBytes48([]byte{3})
|
||||
validators := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: publicKey1[:],
|
||||
ActivationEpoch: 5,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
{
|
||||
PublicKey: publicKey2[:],
|
||||
EffectiveBalance: defaultBal,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
{
|
||||
PublicKey: publicKey3[:],
|
||||
EffectiveBalance: defaultBal,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
require.NoError(t, headState.SetValidators(validators))
|
||||
require.NoError(t, headState.SetInactivityScores([]uint64{0, 0, 0}))
|
||||
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
|
||||
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
bs := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: headState,
|
||||
},
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: nil,
|
||||
InclusionDistances: nil,
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
BalancesBeforeEpochTransition: []uint64{101, 102},
|
||||
BalancesAfterEpochTransition: []uint64{0, 0},
|
||||
MissingValidators: [][]byte{publicKey1[:]},
|
||||
InactivityScores: []uint64{0, 0},
|
||||
}
|
||||
|
||||
res, err := bs.GetValidatorPerformance(ctx, ðpb.ValidatorPerformanceRequest{
|
||||
PublicKeys: [][]byte{publicKey1[:], publicKey3[:], publicKey2[:]},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(want, res) {
|
||||
t.Errorf("Wanted %v\nReceived %v", want, res)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkListValidatorBalances(b *testing.B) {
|
||||
b.StopTimer()
|
||||
beaconDB := dbTest.SetupDB(b)
|
||||
@@ -2475,6 +2545,98 @@ func TestServer_GetIndividualVotes_AltairEndOfEpoch(t *testing.T) {
|
||||
assert.DeepEqual(t, wanted, res, "Unexpected response")
|
||||
}
|
||||
|
||||
func TestServer_GetIndividualVotes_BellatrixEndOfEpoch(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
validators := uint64(32)
|
||||
beaconState, _ := util.DeterministicGenesisStateBellatrix(t, validators)
|
||||
startSlot, err := slots.EpochStart(1)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(startSlot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = startSlot
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
gen := stategen.New(beaconDB)
|
||||
require.NoError(t, gen.SaveState(ctx, gRoot, beaconState))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, gRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
// Save State at the end of the epoch:
|
||||
endSlot, err := slots.EpochEnd(1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
beaconState, _ = util.DeterministicGenesisStateBellatrix(t, validators)
|
||||
require.NoError(t, beaconState.SetSlot(endSlot))
|
||||
|
||||
pb, err := beaconState.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
for i := range pb {
|
||||
pb[i] = 0xff
|
||||
}
|
||||
require.NoError(t, beaconState.SetCurrentParticipationBits(pb))
|
||||
require.NoError(t, beaconState.SetPreviousParticipationBits(pb))
|
||||
|
||||
b.Block.Slot = endSlot
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, gen.SaveState(ctx, gRoot, beaconState))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, gRoot))
|
||||
bs := &Server{
|
||||
StateGen: gen,
|
||||
GenesisTimeFetcher: &mock.ChainService{},
|
||||
}
|
||||
addDefaultReplayerBuilder(bs, beaconDB)
|
||||
|
||||
res, err := bs.GetIndividualVotes(ctx, ðpb.IndividualVotesRequest{
|
||||
Indices: []types.ValidatorIndex{0, 1},
|
||||
Epoch: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
wanted := ðpb.IndividualVotesRespond{
|
||||
IndividualVotes: []*ethpb.IndividualVotesRespond_IndividualVote{
|
||||
{
|
||||
ValidatorIndex: 0,
|
||||
PublicKey: beaconState.Validators()[0].PublicKey,
|
||||
IsActiveInCurrentEpoch: true,
|
||||
IsActiveInPreviousEpoch: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPreviousEpochAttester: true,
|
||||
IsPreviousEpochHeadAttester: true,
|
||||
IsPreviousEpochTargetAttester: true,
|
||||
CurrentEpochEffectiveBalanceGwei: params.BeaconConfig().MaxEffectiveBalance,
|
||||
Epoch: 1,
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
PublicKey: beaconState.Validators()[1].PublicKey,
|
||||
IsActiveInCurrentEpoch: true,
|
||||
IsActiveInPreviousEpoch: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsCurrentEpochAttester: true,
|
||||
IsPreviousEpochAttester: true,
|
||||
IsPreviousEpochHeadAttester: true,
|
||||
IsPreviousEpochTargetAttester: true,
|
||||
CurrentEpochEffectiveBalanceGwei: params.BeaconConfig().MaxEffectiveBalance,
|
||||
Epoch: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.DeepEqual(t, wanted, res, "Unexpected response")
|
||||
}
|
||||
|
||||
func Test_validatorStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -79,6 +79,8 @@ go_library(
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -138,7 +138,7 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
|
||||
}
|
||||
// Query the next epoch assignments for committee subnet subscriptions.
|
||||
nextCommitteeAssignments, _, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1)
|
||||
nextCommitteeAssignments, nextProposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, s, req.Epoch+1)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute next committee assignments: %v", err)
|
||||
}
|
||||
@@ -180,6 +180,16 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
nextAssignment.AttesterSlot = ca.AttesterSlot
|
||||
nextAssignment.CommitteeIndex = ca.CommitteeIndex
|
||||
}
|
||||
// Cache proposer assignment for the current epoch.
|
||||
for _, slot := range proposerIndexToSlots[idx] {
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, idx, [8]byte{} /* payloadID */)
|
||||
}
|
||||
// Cache proposer assignment for the next epoch.
|
||||
for _, slot := range nextProposerIndexToSlots[idx] {
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, idx, [8]byte{} /* payloadID */)
|
||||
}
|
||||
// Prune payload ID cache for any slots before request slot.
|
||||
vs.ProposerSlotIndexCache.PrunePayloadIDs(epochStartSlot)
|
||||
} else {
|
||||
// If the validator isn't in the beacon state, try finding their deposit to determine their status.
|
||||
vStatus, _ := vs.validatorStatus(ctx, s, pubKey)
|
||||
|
||||
@@ -60,9 +60,10 @@ func TestGetDuties_OK(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -144,10 +145,11 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -181,12 +183,12 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
|
||||
require.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
|
||||
}
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
|
||||
require.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
|
||||
// Current epoch and next epoch duties should be equal before the sync period epoch boundary.
|
||||
assert.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
require.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
|
||||
// Current epoch and next epoch duties should not be equal at the sync period epoch boundary.
|
||||
@@ -197,7 +199,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,10 +251,11 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -302,7 +305,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,11 +343,12 @@ func TestGetAltairDuties_UnknownPubkey(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
DepositFetcher: depositCache,
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
DepositFetcher: depositCache,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
unknownPubkey := bytesutil.PadTo([]byte{'u'}, 48)
|
||||
@@ -399,9 +403,10 @@ func TestGetDuties_CurrentEpoch_ShouldNotFail(t *testing.T) {
|
||||
State: bState, Root: genesisRoot[:], Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -437,9 +442,10 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) {
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
pubkey0 := deposits[0].Data.PublicKey
|
||||
@@ -503,11 +509,12 @@ func TestStreamDuties_OK(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
Ctx: ctx,
|
||||
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: c,
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
Ctx: ctx,
|
||||
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: c,
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
@@ -560,11 +567,12 @@ func TestStreamDuties_OK_ChainReorg(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
vs := &Server{
|
||||
Ctx: ctx,
|
||||
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: c,
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
Ctx: ctx,
|
||||
HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: c,
|
||||
StateNotifier: &mockChain.MockStateNotifier{},
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
|
||||
@@ -37,17 +37,24 @@ func (vs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethp
|
||||
case version.Phase0:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlock")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
|
||||
case version.Altair:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockAltair")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
|
||||
case version.Bellatrix:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockBellatrix)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockBellatrix")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
|
||||
@@ -161,7 +161,7 @@ func (vs *Server) depositTrie(ctx context.Context, canonicalEth1Data *ethpb.Eth1
|
||||
|
||||
finalizedDeposits := vs.DepositFetcher.FinalizedDeposits(ctx)
|
||||
depositTrie = finalizedDeposits.Deposits
|
||||
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, canonicalEth1DataHeight)
|
||||
upToEth1DataDeposits := vs.DepositFetcher.NonFinalizedDeposits(ctx, finalizedDeposits.MerkleTrieIndex, canonicalEth1DataHeight)
|
||||
insertIndex := finalizedDeposits.MerkleTrieIndex + 1
|
||||
|
||||
for _, dep := range upToEth1DataDeposits {
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -24,9 +26,31 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// payloadIDCacheMiss tracks the number of payload ID requests that aren't present in the cache.
|
||||
payloadIDCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "payload_id_cache_miss",
|
||||
Help: "The number of payload id get requests that aren't present in the cache.",
|
||||
})
|
||||
// payloadIDCacheHit tracks the number of payload ID requests that are present in the cache.
|
||||
payloadIDCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "payload_id_cache_hit",
|
||||
Help: "The number of payload id get requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
|
||||
// The payload is computed given the respected time of merge.
|
||||
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex) (*enginev1.ExecutionPayload, error) {
|
||||
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
|
||||
if ok && proposerID == vIdx && payloadId != [8]byte{} { // Payload ID is cache hit. Return the cached payload ID.
|
||||
var pid [8]byte
|
||||
copy(pid[:], payloadId[:])
|
||||
payloadIDCacheHit.Inc()
|
||||
return vs.ExecutionEngineCaller.GetPayload(ctx, pid)
|
||||
}
|
||||
payloadIDCacheMiss.Inc()
|
||||
|
||||
st, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
powtesting "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -106,6 +107,11 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
payloadID: &pb.PayloadIDBytes{0x1},
|
||||
validatorIndx: 1,
|
||||
},
|
||||
{
|
||||
name: "transition completed, happy case, payload ID cached)",
|
||||
st: transitionSt,
|
||||
validatorIndx: 100,
|
||||
},
|
||||
{
|
||||
name: "transition completed, could not prepare payload",
|
||||
st: transitionSt,
|
||||
@@ -132,10 +138,12 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
vs := &Server{
|
||||
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr},
|
||||
HeadFetcher: &chainMock.ChainService{State: tt.st},
|
||||
BeaconDB: beaconDB,
|
||||
ExecutionEngineCaller: &powtesting.EngineClient{PayloadIDBytes: tt.payloadID, ErrForkchoiceUpdated: tt.forkchoiceErr},
|
||||
HeadFetcher: &chainMock.ChainService{State: tt.st},
|
||||
BeaconDB: beaconDB,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100})
|
||||
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -2344,7 +2345,8 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
BeaconDB: db,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
type Server struct {
|
||||
Ctx context.Context
|
||||
AttestationCache *cache.AttestationCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -25,7 +26,7 @@ var errPubkeyDoesNotExist = errors.New("pubkey does not exist")
|
||||
var errOptimisticMode = errors.New("the node is currently optimistic and cannot serve validators")
|
||||
var nonExistentIndex = types.ValidatorIndex(^uint64(0))
|
||||
|
||||
const numStatesToCheck = 2
|
||||
var errParticipation = status.Errorf(codes.Internal, "Failed to obtain epoch participation")
|
||||
|
||||
// ValidatorStatus returns the validator status of the current epoch.
|
||||
// The status response can be one of the following:
|
||||
@@ -110,44 +111,67 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
|
||||
return nil, status.Error(codes.Internal, "Could not get head state")
|
||||
}
|
||||
|
||||
currEpoch := slots.ToEpoch(headState.Slot())
|
||||
isRecent, resp := checkValidatorsAreRecent(currEpoch, req)
|
||||
// Return early if we are in phase0.
|
||||
if headState.Version() == version.Phase0 {
|
||||
log.Info("Skipping goppelganger check for Phase 0")
|
||||
|
||||
resp := ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
|
||||
}
|
||||
for _, v := range req.ValidatorRequests {
|
||||
resp.Responses = append(resp.Responses,
|
||||
ðpb.DoppelGangerResponse_ValidatorResponse{
|
||||
PublicKey: v.PublicKey,
|
||||
DuplicateExists: false,
|
||||
})
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
headSlot := headState.Slot()
|
||||
currEpoch := slots.ToEpoch(headSlot)
|
||||
|
||||
// If all provided keys are recent we skip this check
|
||||
// as we are unable to effectively determine if a doppelganger
|
||||
// is active.
|
||||
isRecent, resp := checkValidatorsAreRecent(currEpoch, req)
|
||||
if isRecent {
|
||||
return resp, nil
|
||||
}
|
||||
// We walk back from the current head state to the state at the beginning of the previous 2 epochs.
|
||||
// Where S_i , i := 0,1,2. i = 0 would signify the current head state in this epoch.
|
||||
previousEpoch, err := currEpoch.SafeSub(1)
|
||||
if err != nil {
|
||||
previousEpoch = currEpoch
|
||||
}
|
||||
olderEpoch, err := previousEpoch.SafeSub(1)
|
||||
if err != nil {
|
||||
olderEpoch = previousEpoch
|
||||
}
|
||||
prevState, err := vs.retrieveAfterEpochTransition(ctx, previousEpoch)
|
||||
|
||||
// We request a state 32 slots ago. We are guaranteed to have
|
||||
// currentSlot > 32 since we assume that we are in Altair's fork.
|
||||
prevState, err := vs.ReplayerBuilder.ReplayerForSlot(headSlot - params.BeaconConfig().SlotsPerEpoch).ReplayBlocks(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get previous state")
|
||||
}
|
||||
olderState, err := vs.retrieveAfterEpochTransition(ctx, olderEpoch)
|
||||
|
||||
headCurrentParticipation, err := headState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get older state")
|
||||
return nil, errParticipation
|
||||
}
|
||||
headPreviousParticipation, err := headState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, errParticipation
|
||||
}
|
||||
prevCurrentParticipation, err := prevState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, errParticipation
|
||||
}
|
||||
prevPreviousParticipation, err := prevState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, errParticipation
|
||||
}
|
||||
|
||||
resp = ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{},
|
||||
}
|
||||
for _, v := range req.ValidatorRequests {
|
||||
// If the validator's last recorded epoch was
|
||||
// less than or equal to `numStatesToCheck` epochs ago, this method will not
|
||||
// be able to catch duplicates. This is due to how attestation
|
||||
// inclusion works, where an attestation for the current epoch
|
||||
// is able to included in the current or next epoch. Depending
|
||||
// on which epoch it is included the balance change will be
|
||||
// reflected in the following epoch.
|
||||
if v.Epoch+numStatesToCheck >= currEpoch {
|
||||
// If the validator's last recorded epoch was less than 1 epoch
|
||||
// ago, the current doppelganger check will not be able to
|
||||
// identify dopplelgangers since an attestation can take up to
|
||||
// 31 slots to be included.
|
||||
if v.Epoch+1 >= currEpoch {
|
||||
resp.Responses = append(resp.Responses,
|
||||
ðpb.DoppelGangerResponse_ValidatorResponse{
|
||||
PublicKey: v.PublicKey,
|
||||
@@ -155,37 +179,15 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger
|
||||
})
|
||||
continue
|
||||
}
|
||||
valIndex, ok := olderState.ValidatorIndexByPubkey(bytesutil.ToBytes48(v.PublicKey))
|
||||
valIndex, ok := prevState.ValidatorIndexByPubkey(bytesutil.ToBytes48(v.PublicKey))
|
||||
if !ok {
|
||||
// Ignore if validator pubkey doesn't exist.
|
||||
continue
|
||||
}
|
||||
baseBal, err := olderState.BalanceAtIndex(valIndex)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get validator's balance")
|
||||
}
|
||||
nextBal, err := prevState.BalanceAtIndex(valIndex)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get validator's balance")
|
||||
}
|
||||
// If the next epoch's balance is higher, we mark it as an existing
|
||||
// duplicate.
|
||||
if nextBal > baseBal {
|
||||
log.Infof("current %d with last epoch %d and difference in bal %d gwei", currEpoch, v.Epoch, nextBal-baseBal)
|
||||
resp.Responses = append(resp.Responses,
|
||||
ðpb.DoppelGangerResponse_ValidatorResponse{
|
||||
PublicKey: v.PublicKey,
|
||||
DuplicateExists: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
currBal, err := headState.BalanceAtIndex(valIndex)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get validator's balance")
|
||||
}
|
||||
// If the current epoch's balance is higher, we mark it as an existing
|
||||
// duplicate.
|
||||
if currBal > nextBal {
|
||||
|
||||
if (headCurrentParticipation[valIndex] != 0) || (headPreviousParticipation[valIndex] != 0) ||
|
||||
(prevCurrentParticipation[valIndex] != 0) || (prevPreviousParticipation[valIndex] != 0) {
|
||||
log.WithField("ValidatorIndex", valIndex).Infof("Participation flag found")
|
||||
resp.Responses = append(resp.Responses,
|
||||
ðpb.DoppelGangerResponse_ValidatorResponse{
|
||||
PublicKey: v.PublicKey,
|
||||
@@ -374,8 +376,8 @@ func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequ
|
||||
// Due to how balances are reflected for individual
|
||||
// validators, we can only effectively determine if a
|
||||
// validator voted or not if we are able to look
|
||||
// back more than `numStatesToCheck` epochs into the past.
|
||||
if v.Epoch+numStatesToCheck < headEpoch {
|
||||
// back more than 1 epoch into the past.
|
||||
if v.Epoch+1 < headEpoch {
|
||||
validatorsAreRecent = false
|
||||
// Zero out response if we encounter non-recent validators to
|
||||
// guard against potential misuse.
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
mockstategen "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen/mock"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
@@ -961,27 +959,15 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
|
||||
name: "normal doppelganger request",
|
||||
wantErr: false,
|
||||
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
|
||||
hs, ps, os, keys, builder := createStateSetup(t, 4)
|
||||
// Previous Epoch State
|
||||
for i := 0; i < 3; i++ {
|
||||
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
assert.NoError(t, err)
|
||||
// Add 100 gwei, to mock an inactivity leak
|
||||
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+1000000000))
|
||||
}
|
||||
// Older Epoch State
|
||||
for i := 0; i < 3; i++ {
|
||||
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
assert.NoError(t, err)
|
||||
// Add 200 gwei, to mock an inactivity leak
|
||||
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+2000000000))
|
||||
}
|
||||
hs, ps, keys := createStateSetupAltair(t, 3)
|
||||
rb := mockstategen.NewMockReplayerBuilder()
|
||||
rb.SetMockStateForSlot(ps, 20)
|
||||
vs := &Server{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
State: hs,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ReplayerBuilder: builder,
|
||||
ReplayerBuilder: rb,
|
||||
}
|
||||
request := ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
|
||||
@@ -1005,37 +991,19 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
|
||||
name: "doppelganger exists current epoch",
|
||||
wantErr: false,
|
||||
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
|
||||
hs, ps, os, keys, builder := createStateSetup(t, 4)
|
||||
// Previous Epoch State
|
||||
for i := 0; i < 2; i++ {
|
||||
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
assert.NoError(t, err)
|
||||
// Add 100 gwei, to mock an inactivity leak
|
||||
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+1000000000))
|
||||
}
|
||||
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(2))
|
||||
assert.NoError(t, err)
|
||||
// Sub 100 gwei, to mock an active validator.
|
||||
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-1000000000))
|
||||
|
||||
// Older Epoch State
|
||||
for i := 0; i < 2; i++ {
|
||||
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
assert.NoError(t, err)
|
||||
// Add 200 gwei, to mock an inactivity leak
|
||||
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+2000000000))
|
||||
}
|
||||
bal, err = os.BalanceAtIndex(types.ValidatorIndex(2))
|
||||
assert.NoError(t, err)
|
||||
// Sub 100 gwei, to mock an active validator.
|
||||
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-1000000000))
|
||||
hs, ps, keys := createStateSetupAltair(t, 3)
|
||||
rb := mockstategen.NewMockReplayerBuilder()
|
||||
rb.SetMockStateForSlot(ps, 20)
|
||||
currentIndices := make([]byte, 64)
|
||||
currentIndices[2] = 1
|
||||
require.NoError(t, hs.SetCurrentParticipationBits(currentIndices))
|
||||
|
||||
vs := &Server{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
State: hs,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ReplayerBuilder: builder,
|
||||
ReplayerBuilder: rb,
|
||||
}
|
||||
request := ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
|
||||
@@ -1070,37 +1038,19 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
|
||||
name: "doppelganger exists previous epoch",
|
||||
wantErr: false,
|
||||
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
|
||||
hs, ps, os, keys, builder := createStateSetup(t, 4)
|
||||
// Previous Epoch State
|
||||
for i := 0; i < 2; i++ {
|
||||
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
assert.NoError(t, err)
|
||||
// Add 100 gwei, to mock an inactivity leak
|
||||
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+1000000000))
|
||||
}
|
||||
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(2))
|
||||
assert.NoError(t, err)
|
||||
// Sub 100 gwei, to mock an active validator.
|
||||
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-1000000000))
|
||||
|
||||
// Older Epoch State
|
||||
for i := 0; i < 2; i++ {
|
||||
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
assert.NoError(t, err)
|
||||
// Add 200 gwei, to mock an inactivity leak
|
||||
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal+2000000000))
|
||||
}
|
||||
bal, err = os.BalanceAtIndex(types.ValidatorIndex(2))
|
||||
assert.NoError(t, err)
|
||||
// Sub 200 gwei, to mock an active validator.
|
||||
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(2), bal-2000000000))
|
||||
hs, ps, keys := createStateSetupAltair(t, 3)
|
||||
prevIndices := make([]byte, 64)
|
||||
prevIndices[2] = 1
|
||||
require.NoError(t, ps.SetPreviousParticipationBits(prevIndices))
|
||||
rb := mockstategen.NewMockReplayerBuilder()
|
||||
rb.SetMockStateForSlot(ps, 20)
|
||||
|
||||
vs := &Server{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
State: hs,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ReplayerBuilder: builder,
|
||||
ReplayerBuilder: rb,
|
||||
}
|
||||
request := ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
|
||||
@@ -1135,29 +1085,26 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
|
||||
name: "multiple doppelganger exists",
|
||||
wantErr: false,
|
||||
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
|
||||
hs, ps, os, keys, builder := createStateSetup(t, 4)
|
||||
// Previous Epoch State
|
||||
for i := 10; i < 15; i++ {
|
||||
bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
assert.NoError(t, err)
|
||||
// Add 100 gwei, to mock an inactivity leak
|
||||
assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-1000000000))
|
||||
}
|
||||
hs, ps, keys := createStateSetupAltair(t, 3)
|
||||
currentIndices := make([]byte, 64)
|
||||
currentIndices[10] = 1
|
||||
currentIndices[11] = 2
|
||||
require.NoError(t, hs.SetPreviousParticipationBits(currentIndices))
|
||||
rb := mockstategen.NewMockReplayerBuilder()
|
||||
rb.SetMockStateForSlot(ps, 20)
|
||||
|
||||
// Older Epoch State
|
||||
for i := 10; i < 15; i++ {
|
||||
bal, err := os.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
assert.NoError(t, err)
|
||||
// Add 200 gwei, to mock an inactivity leak
|
||||
assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-2000000000))
|
||||
prevIndices := make([]byte, 64)
|
||||
for i := 12; i < 20; i++ {
|
||||
prevIndices[i] = 1
|
||||
}
|
||||
require.NoError(t, ps.SetCurrentParticipationBits(prevIndices))
|
||||
|
||||
vs := &Server{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
State: hs,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ReplayerBuilder: builder,
|
||||
ReplayerBuilder: rb,
|
||||
}
|
||||
request := ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
|
||||
@@ -1175,6 +1122,17 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
|
||||
DuplicateExists: true,
|
||||
})
|
||||
}
|
||||
for i := 15; i < 20; i++ {
|
||||
request.ValidatorRequests = append(request.ValidatorRequests, ðpb.DoppelGangerRequest_ValidatorRequest{
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
Epoch: 3,
|
||||
SignedRoot: []byte{'A'},
|
||||
})
|
||||
response.Responses = append(response.Responses, ðpb.DoppelGangerResponse_ValidatorResponse{
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
DuplicateExists: false,
|
||||
})
|
||||
}
|
||||
|
||||
return vs, request, response
|
||||
},
|
||||
@@ -1183,14 +1141,16 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
|
||||
name: "attesters are too recent",
|
||||
wantErr: false,
|
||||
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
|
||||
hs, _, _, keys, _ := createStateSetup(t, 4)
|
||||
hs, ps, keys := createStateSetupAltair(t, 3)
|
||||
rb := mockstategen.NewMockReplayerBuilder()
|
||||
rb.SetMockStateForSlot(ps, 20)
|
||||
|
||||
vs := &Server{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
State: hs,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
ReplayerBuilder: nil,
|
||||
ReplayerBuilder: rb,
|
||||
}
|
||||
request := ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: make([]*ethpb.DoppelGangerRequest_ValidatorRequest, 0),
|
||||
@@ -1208,6 +1168,39 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
return vs, request, response
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exit early for Phase 0",
|
||||
wantErr: false,
|
||||
svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) {
|
||||
hs, _, keys := createStateSetupPhase0(t, 3)
|
||||
|
||||
vs := &Server{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
State: hs,
|
||||
},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
request := ðpb.DoppelGangerRequest{
|
||||
ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{
|
||||
{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
Epoch: 1,
|
||||
SignedRoot: []byte{'A'},
|
||||
},
|
||||
},
|
||||
}
|
||||
response := ðpb.DoppelGangerResponse{
|
||||
Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{
|
||||
{
|
||||
PublicKey: keys[0].PublicKey().Marshal(),
|
||||
DuplicateExists: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return vs, request, response
|
||||
},
|
||||
},
|
||||
@@ -1228,104 +1221,36 @@ func TestServer_CheckDoppelGanger(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func createStateSetup(t *testing.T, head types.Epoch) (state.BeaconState,
|
||||
state.BeaconState, state.BeaconState, []bls.SecretKey, *mockstategen.MockReplayerBuilder) {
|
||||
rb := &mockstategen.MockReplayerBuilder{}
|
||||
func createStateSetupPhase0(t *testing.T, head types.Epoch) (state.BeaconState,
|
||||
state.BeaconState, []bls.SecretKey) {
|
||||
gs, keys := util.DeterministicGenesisState(t, 64)
|
||||
hs := gs.Copy()
|
||||
|
||||
// Head State
|
||||
headEpoch := head
|
||||
headSlot := types.Slot(headEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
headSlot := types.Slot(head)*params.BeaconConfig().SlotsPerEpoch + params.BeaconConfig().SlotsPerEpoch/2
|
||||
assert.NoError(t, hs.SetSlot(headSlot))
|
||||
assingments, _, err := helpers.CommitteeAssignments(context.Background(), hs, headEpoch)
|
||||
assert.NoError(t, err)
|
||||
for _, ctr := range assingments {
|
||||
pendingAtt := ðpb.PendingAttestation{
|
||||
AggregationBits: bitfield.NewBitlist64(uint64(len(ctr.Committee))).ToBitlist().Not(),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: ctr.AttesterSlot,
|
||||
CommitteeIndex: ctr.CommitteeIndex,
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
InclusionDelay: 1,
|
||||
ProposerIndex: 10,
|
||||
}
|
||||
assert.NoError(t, hs.AppendCurrentEpochAttestations(pendingAtt))
|
||||
}
|
||||
rb.SetMockState(hs)
|
||||
|
||||
// Previous Epoch State
|
||||
prevEpoch := headEpoch - 1
|
||||
prevSlot := headSlot - params.BeaconConfig().SlotsPerEpoch
|
||||
ps := gs.Copy()
|
||||
prevSlot, err := slots.EpochEnd(prevEpoch)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, ps.SetSlot(prevSlot))
|
||||
assingments, _, err = helpers.CommitteeAssignments(context.Background(), ps, prevEpoch)
|
||||
assert.NoError(t, err)
|
||||
for _, ctr := range assingments {
|
||||
pendingAtt := ðpb.PendingAttestation{
|
||||
AggregationBits: bitfield.NewBitlist64(uint64(len(ctr.Committee))).ToBitlist().Not(),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: ctr.AttesterSlot,
|
||||
CommitteeIndex: ctr.CommitteeIndex,
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
InclusionDelay: 1,
|
||||
ProposerIndex: 10,
|
||||
}
|
||||
assert.NoError(t, ps.AppendCurrentEpochAttestations(pendingAtt))
|
||||
}
|
||||
rb.SetMockState(ps)
|
||||
|
||||
// Older Epoch State
|
||||
olderEpoch := prevEpoch - 1
|
||||
os := gs.Copy()
|
||||
olderSlot, err := slots.EpochEnd(olderEpoch)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, os.SetSlot(olderSlot))
|
||||
assingments, _, err = helpers.CommitteeAssignments(context.Background(), os, olderEpoch)
|
||||
assert.NoError(t, err)
|
||||
for _, ctr := range assingments {
|
||||
attSlot := ctr.AttesterSlot
|
||||
if attSlot == olderSlot {
|
||||
continue
|
||||
}
|
||||
pendingAtt := ðpb.PendingAttestation{
|
||||
AggregationBits: bitfield.NewBitlist64(uint64(len(ctr.Committee))).ToBitlist().Not(),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: attSlot,
|
||||
CommitteeIndex: ctr.CommitteeIndex,
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
InclusionDelay: 1,
|
||||
ProposerIndex: 10,
|
||||
}
|
||||
assert.NoError(t, os.AppendCurrentEpochAttestations(pendingAtt))
|
||||
}
|
||||
rb.SetMockState(os)
|
||||
return hs, ps, os, keys, rb
|
||||
return hs, ps, keys
|
||||
}
|
||||
|
||||
func createStateSetupAltair(t *testing.T, head types.Epoch) (state.BeaconState,
|
||||
state.BeaconState, []bls.SecretKey) {
|
||||
gs, keys := util.DeterministicGenesisStateAltair(t, 64)
|
||||
hs := gs.Copy()
|
||||
|
||||
// Head State
|
||||
headSlot := types.Slot(head)*params.BeaconConfig().SlotsPerEpoch + params.BeaconConfig().SlotsPerEpoch/2
|
||||
assert.NoError(t, hs.SetSlot(headSlot))
|
||||
|
||||
// Previous Epoch State
|
||||
prevSlot := headSlot - params.BeaconConfig().SlotsPerEpoch
|
||||
ps := gs.Copy()
|
||||
assert.NoError(t, ps.SetSlot(prevSlot))
|
||||
|
||||
return hs, ps, keys
|
||||
}
|
||||
|
||||
@@ -109,6 +109,7 @@ type Config struct {
|
||||
StateGen *stategen.State
|
||||
MaxMsgSize int
|
||||
ExecutionEngineCaller powchain.EngineCaller
|
||||
ProposerIdsCache *cache.ProposerPayloadIDsCache
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
@@ -207,6 +208,7 @@ func (s *Service) Start() {
|
||||
ReplayerBuilder: ch,
|
||||
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
// Hash: 6de36f732d72b5c4c0c967bc0edcc752b7afdd337e829486954eb6affda84da8
|
||||
// Hash: 2e923b42b8e4fcc278301da6506b212334a78169cb32c70e0d66a636435b8925
|
||||
package v1
|
||||
|
||||
import (
|
||||
|
||||
2
beacon-chain/state/state-native/v2/generated.ssz.go
Executable file → Normal file
2
beacon-chain/state/state-native/v2/generated.ssz.go
Executable file → Normal file
@@ -1,5 +1,5 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
// Hash: 6a7886393e8874ccf57ea6c160647da09f5e541234a235ee71f3bf786d56a100
|
||||
// Hash: ec98b14e43fd11e74e0d9e705a7afe74a77706c3e215d7940b11411859873f4b
|
||||
package v2
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
// Hash: a71c6e70ae416774612961057f4c96b97b5c3323270a80167d30ea672ea2f5cd
|
||||
// Hash: aa2156293aac4326afe2b8c0ba985a0291c83f20c8d8b92d148bc810a7f442e9
|
||||
package v3
|
||||
|
||||
import (
|
||||
|
||||
@@ -151,6 +151,15 @@ func (e *epochBoundaryState) put(r [32]byte, s state.BeaconState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete the state from the epoch boundary state cache.
|
||||
func (e *epochBoundaryState) delete(r [32]byte) error {
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
return e.rootStateCache.Delete(&rootStateInfo{
|
||||
root: r,
|
||||
})
|
||||
}
|
||||
|
||||
// trim the FIFO queue to the maxSize.
|
||||
func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
for s := uint64(len(queue.ListKeys())); s > maxSize; s-- {
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestEpochBoundaryStateCache_BadRootKey(t *testing.T) {
|
||||
assert.ErrorContains(t, errNotRootStateInfo.Error(), err, "Did not get wanted error")
|
||||
}
|
||||
|
||||
func TestEpochBoundaryStateCache_CanSave(t *testing.T) {
|
||||
func TestEpochBoundaryStateCache_CanSaveAndDelete(t *testing.T) {
|
||||
e := newBoundaryStateCache()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -46,6 +46,17 @@ func TestEpochBoundaryStateCache_CanSave(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, exists, "Should exist")
|
||||
assert.DeepSSZEqual(t, s.InnerStateUnsafe(), got.state.InnerStateUnsafe(), "Should have the same state")
|
||||
|
||||
require.NoError(t, e.delete(r))
|
||||
got, exists, err = e.getByRoot([32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, exists, "Should not exist")
|
||||
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")
|
||||
|
||||
got, exists, err = e.getBySlot(1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, exists, "Should not exist")
|
||||
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")
|
||||
}
|
||||
|
||||
func TestEpochBoundaryStateCache_CanTrim(t *testing.T) {
|
||||
|
||||
@@ -39,7 +39,7 @@ func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool,
|
||||
return has, nil
|
||||
}
|
||||
|
||||
// StateByRootIfCached retrieves a state using the input block root only if the state is already in the cache
|
||||
// StateByRootIfCachedNoCopy retrieves a state using the input block root only if the state is already in the cache
|
||||
func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState {
|
||||
if !s.hotStateCache.has(blockRoot) {
|
||||
return nil
|
||||
@@ -146,6 +146,12 @@ func (s *State) RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*e
|
||||
return nil, errors.New("could not find block in DB")
|
||||
}
|
||||
|
||||
// DeleteStateFromCaches deletes the state from the caches.
|
||||
func (s *State) DeleteStateFromCaches(_ context.Context, blockRoot [32]byte) error {
|
||||
s.hotStateCache.delete(blockRoot)
|
||||
return s.epochBoundaryStateCache.delete(blockRoot)
|
||||
}
|
||||
|
||||
// This loads a beacon state from either the cache or DB then replay blocks up the requested block root.
|
||||
func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.loadStateByRoot")
|
||||
|
||||
@@ -165,6 +165,35 @@ func TestStateByRoot_HotStateCached(t *testing.T) {
|
||||
require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestDeleteStateFromCaches(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
service := New(beaconDB)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, service.hotStateCache.has(r))
|
||||
_, has, err := service.epochBoundaryStateCache.getByRoot(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
|
||||
service.hotStateCache.put(r, beaconState)
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(r, beaconState))
|
||||
|
||||
require.Equal(t, true, service.hotStateCache.has(r))
|
||||
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
|
||||
require.NoError(t, service.DeleteStateFromCaches(ctx, r))
|
||||
|
||||
require.Equal(t, false, service.hotStateCache.has(r))
|
||||
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
|
||||
func TestStateByRoot_StateByRootInitialSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
@@ -23,7 +23,7 @@ func NewMockService() *MockStateManager {
|
||||
}
|
||||
}
|
||||
|
||||
// StateByRootIfCached
|
||||
// StateByRootIfCachedNoCopy
|
||||
func (_ *MockStateManager) StateByRootIfCachedNoCopy(_ [32]byte) state.BeaconState {
|
||||
panic("implement me")
|
||||
}
|
||||
@@ -124,3 +124,8 @@ func (m *MockStateManager) AddStateForRoot(state state.BeaconState, blockRoot [3
|
||||
func (m *MockStateManager) AddStateForSlot(state state.BeaconState, slot types.Slot) {
|
||||
m.StatesBySlot[slot] = state
|
||||
}
|
||||
|
||||
// DeleteStateFromCaches --
|
||||
func (m *MockStateManager) DeleteStateFromCaches(context.Context, [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -41,6 +41,13 @@ func (b *MockReplayerBuilder) SetMockState(s state.BeaconState) {
|
||||
b.forSlot[s.Slot()] = &MockReplayer{State: s}
|
||||
}
|
||||
|
||||
func (b *MockReplayerBuilder) SetMockStateForSlot(s state.BeaconState, slot types.Slot) {
|
||||
if b.forSlot == nil {
|
||||
b.forSlot = make(map[types.Slot]*MockReplayer)
|
||||
}
|
||||
b.forSlot[slot] = &MockReplayer{State: s}
|
||||
}
|
||||
|
||||
func (b *MockReplayerBuilder) SetMockSlotError(s types.Slot, e error) {
|
||||
if b.forSlot == nil {
|
||||
b.forSlot = make(map[types.Slot]*MockReplayer)
|
||||
|
||||
@@ -39,6 +39,7 @@ type StateManager interface {
|
||||
ForceCheckpoint(ctx context.Context, root []byte) error
|
||||
EnableSaveHotStateToDB(_ context.Context)
|
||||
DisableSaveHotStateToDB(ctx context.Context) error
|
||||
DeleteStateFromCaches(ctx context.Context, blockRoot [32]byte) error
|
||||
}
|
||||
|
||||
// State is a concrete implementation of StateManager.
|
||||
|
||||
@@ -152,14 +152,16 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.validateBeaconBlock(ctx, b, blkRoot); err != nil {
|
||||
err = s.validateBeaconBlock(ctx, b, blkRoot)
|
||||
switch {
|
||||
case errors.Is(ErrOptimisticParent, err): // Ok to continue process block with parent that is an optimistic candidate.
|
||||
case err != nil:
|
||||
log.Debugf("Could not validate block from slot %d: %v", b.Block().Slot(), err)
|
||||
s.setBadBlock(ctx, blkRoot)
|
||||
tracing.AnnotateError(span, err)
|
||||
// In the next iteration of the queue, this block will be removed from
|
||||
// the pending queue as it has been marked as a 'bad' block.
|
||||
span.End()
|
||||
continue
|
||||
default:
|
||||
}
|
||||
|
||||
if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot); err != nil {
|
||||
|
||||
@@ -111,6 +111,84 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
|
||||
assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: &mock.ChainService{
|
||||
Optimistic: true,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
stateGen: stategen.New(db),
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
r.initCaches()
|
||||
|
||||
b0 := util.NewBeaconBlock()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wsb))
|
||||
b0Root, err := b0.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b3 := util.NewBeaconBlock()
|
||||
b3.Block.Slot = 3
|
||||
b3.Block.ParentRoot = b0Root[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wsb))
|
||||
// Incomplete block link
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.ParentRoot = b0Root[:]
|
||||
b1Root, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 2
|
||||
b2.Block.ParentRoot = b1Root[:]
|
||||
b2Root, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add b2 to the cache
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root))
|
||||
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
|
||||
// Add b1 to the cache
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1Root))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wsb))
|
||||
|
||||
nBlock := util.NewBeaconBlock()
|
||||
nBlock.Block.Slot = b1.Block.Slot
|
||||
nRoot, err := nBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert bad b1 in the cache to verify the good one doesn't get replaced.
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(nBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wsb, nRoot))
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
|
||||
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ var (
|
||||
HTTPWeb3ProviderFlag = &cli.StringFlag{
|
||||
Name: "http-web3provider",
|
||||
Usage: "A mainchain web3 provider string http endpoint. Can contain auth header as well in the format --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Basic xxx\" for project secret (base64 encoded) and --http-web3provider=\"https://goerli.infura.io/v3/xxxx,Bearer xxx\" for jwt use",
|
||||
Value: "",
|
||||
Value: "http://localhost:8545",
|
||||
}
|
||||
// ExecutionJWTSecretFlag provides a path to a file containing a hex-encoded string representing a 32 byte secret
|
||||
// used to authenticate with an execution node via HTTP. This is required if using an HTTP connection, otherwise all requests
|
||||
|
||||
@@ -27,7 +27,7 @@ func FlagOptions(c *cli.Context) ([]powchain.Option, error) {
|
||||
powchain.WithEth1HeaderRequestLimit(c.Uint64(flags.Eth1HeaderReqLimit.Name)),
|
||||
}
|
||||
if len(jwtSecret) > 0 {
|
||||
opts = append(opts, powchain.WithJWTSecret(jwtSecret))
|
||||
opts = append(opts, powchain.WithHttpEndpointsAndJWTSecret(endpoints, jwtSecret))
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user