Compare commits

..

12 Commits

Author SHA1 Message Date
terence tsao
9111b69117 Update process_block.go 2022-02-06 10:08:01 -08:00
terence tsao
7b1db6b74a Revert back to nil 2022-02-06 09:33:15 -08:00
terence tsao
d611d09a9c Copy map 2022-02-06 08:45:03 -08:00
terence tsao
ffca04066d Update process_block_test.go 2022-02-05 19:40:22 -08:00
terence tsao
acb6d4e99e Fix build 2022-02-05 19:31:50 -08:00
prylabs-bulldozer[bot]
8ec4a9c2c1 Merge refs/heads/develop into save-sync-tips 2022-02-06 03:15:03 +00:00
terence tsao
2c508e95dd Merge branch 'develop' into save-sync-tips 2022-02-05 19:12:51 -08:00
terence tsao
453cf4bbab Merge branch 'develop' into save-sync-tips 2022-02-03 11:14:26 -08:00
terence tsao
785735e8f5 Merge branch 'develop' into save-sync-tips 2022-02-02 07:31:59 -08:00
terence tsao
977de106e9 Merge branch 'develop' into save-sync-tips 2022-02-01 12:00:14 -08:00
terence tsao
cd1ebb0b85 Merge branch 'develop' into save-sync-tips 2022-02-01 11:51:07 -08:00
terence tsao
59bef7a5d9 Save sync tips to DB 2022-02-01 11:42:13 -08:00
336 changed files with 5444 additions and 8363 deletions

View File

@@ -222,7 +222,7 @@ filegroup(
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.1.9"
consensus_spec_version = "v1.1.8"
bls_test_version = "v0.1.1"
@@ -238,7 +238,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "207d9c326ba4fa1f34bab7b6169201c32f2611755db030909a3405873445e0ba",
sha256 = "e4d2b7830e85734442d7172887dcd4edc0985d6256bafedb3353ab477a1433c0",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -254,7 +254,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "a3995b39f412db236b2f1db909f288218da53cb53b9923b71dda9d144d68f40a",
sha256 = "4a88d01ad12260220ab5c8efdeec6534bac48a47f29ba4f7977ea14c9d07b0fe",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -270,7 +270,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "76cea7a4c8e32d458ad456b54bfbb30bc772481a91954a4cd97e229aa3023b1d",
sha256 = "0033fe107d9d2adb8d4fcb60dfb1c43fc5a54f0af970525c962124221757c266",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -285,7 +285,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "0fc429684775f943250dce1f9c485ac25e26c6395d7f585c8d1317becec2ace7",
sha256 = "3fc3b8809d140a1ab61350fbd410f33add2851a63829d874dcb620babba603de",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -7,11 +7,11 @@ go_library(
"error.go",
"head.go",
"head_sync_committee_info.go",
"info.go",
"init_sync_process_block.go",
"log.go",
"metrics.go",
"new_slot.go",
"optimistic_sync.go",
"options.go",
"pow_block.go",
"process_attestation.go",
@@ -38,7 +38,6 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
@@ -71,6 +70,7 @@ go_library(
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_emicklei_dot//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
@@ -98,11 +98,11 @@ go_test(
"checktags_test.go",
"head_sync_committee_info_test.go",
"head_test.go",
"info_test.go",
"init_test.go",
"log_test.go",
"metrics_test.go",
"mock_test.go",
"optimistic_sync_test.go",
"pow_block_test.go",
"process_attestation_test.go",
"process_block_test.go",

View File

@@ -36,7 +36,7 @@ type TimeFetcher interface {
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
type GenesisFetcher interface {
GenesisValidatorsRoot() [32]byte
GenesisValidatorRoot() [32]byte
}
// HeadFetcher defines a common interface for methods in blockchain service which
@@ -48,14 +48,13 @@ type HeadFetcher interface {
HeadState(ctx context.Context) (state.BeaconState, error)
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
HeadGenesisValidatorsRoot() [32]byte
HeadGenesisValidatorRoot() [32]byte
HeadETH1Data() *ethpb.Eth1Data
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ProtoArrayStore() *protoarray.Store
ChainHeads() ([][32]byte, []types.Slot)
IsOptimistic() (bool, error)
IsOptimisticForRoot(root [32]byte) (bool, error)
IsOptimistic(ctx context.Context) (bool, error)
HeadSyncCommitteeFetcher
HeadDomainFetcher
}
@@ -214,8 +213,8 @@ func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, er
return helpers.Seed(s.headState(ctx), epoch, params.BeaconConfig().DomainBeaconAttester)
}
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
func (s *Service) HeadGenesisValidatorRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
@@ -223,7 +222,7 @@ func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}
}
return s.headGenesisValidatorsRoot()
return s.headGenesisValidatorRoot()
}
// HeadETH1Data returns the eth1data of the current head state.
@@ -247,16 +246,16 @@ func (s *Service) GenesisTime() time.Time {
return s.genesisTime
}
// GenesisValidatorsRoot returns the genesis validator
// GenesisValidatorRoot returns the genesis validator
// root of the chain.
func (s *Service) GenesisValidatorsRoot() [32]byte {
func (s *Service) GenesisValidatorRoot() [32]byte {
s.headLock.RLock()
defer s.headLock.RUnlock()
if !s.hasHeadState() {
return [32]byte{}
}
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
}
// CurrentFork retrieves the latest fork information of the beacon chain.
@@ -287,7 +286,23 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (s *Service) ChainHeads() ([][32]byte, []types.Slot) {
return s.cfg.ForkChoiceStore.Tips()
nodes := s.ProtoArrayStore().Nodes()
// Deliberate choice to not preallocate space for below.
// Heads cant be more than 2-3 in the worst case where pre-allocation will be 64 to begin with.
headsRoots := make([][32]byte, 0)
headsSlots := make([]types.Slot, 0)
nonExistentNode := ^uint64(0)
for _, node := range nodes {
// Possible heads have no children.
if node.BestDescendant() == nonExistentNode && node.BestChild() == nonExistentNode {
headsRoots = append(headsRoots, node.Root())
headsSlots = append(headsSlots, node.Slot())
}
}
return headsRoots, headsSlots
}
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
@@ -315,16 +330,10 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
}
// IsOptimistic returns true if the current head is optimistic.
func (s *Service) IsOptimistic() (bool, error) {
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
return s.cfg.ForkChoiceStore.IsOptimistic(s.head.root)
}
// IsOptimisticForRoot takes the root and slot as aguments instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(root [32]byte) (bool, error) {
return s.cfg.ForkChoiceStore.IsOptimistic(root)
return s.cfg.ForkChoiceStore.Optimistic(ctx, s.head.root, s.head.slot)
}
// SetGenesisTime sets the genesis time of beacon chain.

View File

@@ -185,15 +185,15 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
}
}
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
// Should not panic if head state is nil.
c := &Service{}
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
s, err := v1.InitializeFromProto(&ethpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
require.NoError(t, err)
c.head = &head{state: s}
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
}
func TestHeadETH1Data_Nil(t *testing.T) {
@@ -265,33 +265,33 @@ func TestService_HeadSeed(t *testing.T) {
require.DeepEqual(t, seed, root)
}
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
func TestService_HeadGenesisValidatorRoot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
c := &Service{}
c.head = &head{}
root := c.HeadGenesisValidatorsRoot()
root := c.HeadGenesisValidatorRoot()
require.Equal(t, [32]byte{}, root)
c.head = &head{state: s}
root = c.HeadGenesisValidatorsRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
root = c.HeadGenesisValidatorRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorRoot())
}
func TestService_ProtoArrayStore(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}}
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
p := c.ProtoArrayStore()
require.Equal(t, 0, int(p.FinalizedEpoch()))
}
func TestService_ChainHeads(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, [32]byte{}, 0, 0))
roots, slots := c.ChainHeads()
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
@@ -356,35 +356,13 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
}
func TestService_IsOptimistic_NotOptimistic(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
opt, err := c.IsOptimistic()
require.NoError(t, err)
require.Equal(t, false, opt)
}
func TestService_IsOptimistic(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
opt, err := c.IsOptimistic()
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
opt, err := c.IsOptimisticForRoot([32]byte{'a'})
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}

View File

@@ -77,9 +77,8 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
if err != nil {
return err
}
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch)
// TODO(10261) send optimistic status
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j, false /* optimistic status */); err != nil {
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
return err
}
}
@@ -266,10 +265,10 @@ func (s *Service) headState(ctx context.Context) state.BeaconState {
return s.head.state.Copy()
}
// This returns the genesis validators root of the head state.
// This returns the genesis validator root of the head state.
// This is a lock free version.
func (s *Service) headGenesisValidatorsRoot() [32]byte {
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
func (s *Service) headGenesisValidatorRoot() [32]byte {
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
}
// This returns the validator referenced by the provided index in

View File

@@ -130,7 +130,7 @@ func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, doma
if err != nil {
return nil, err
}
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorsRoot())
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
}
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.

View File

@@ -122,7 +122,7 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
@@ -136,7 +136,7 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
@@ -150,7 +150,7 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
c := &Service{}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
require.NoError(t, err)
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)

View File

@@ -0,0 +1,99 @@
package blockchain
import (
"encoding/hex"
"fmt"
"net/http"
"github.com/emicklei/dot"
"github.com/prysmaticlabs/prysm/config/params"
)
const template = `<html>
<head>
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/viz.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/full.render.js"></script>
<body>
<script type="application/javascript">
var graph = ` + "`%s`;" + `
var viz = new Viz();
viz.renderSVGElement(graph) // reading the graph.
.then(function(element) {
document.body.appendChild(element); // appends to document.
})
.catch(error => {
// Create a new Viz instance (@see Caveats page for more info)
viz = new Viz();
// Possibly display the error
console.error(error);
});
</script>
</head>
</body>
</html>`
// TreeHandler is a handler to serve /tree page in metrics.
func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
headState, err := s.HeadState(r.Context())
if err != nil {
log.WithError(err).Error("Could not get head state")
return
}
if headState == nil || headState.IsNil() {
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
log.WithError(err).Error("Failed to render p2p info page")
}
}
nodes := s.cfg.ForkChoiceStore.Nodes()
graph := dot.NewGraph(dot.Directed)
graph.Attr("rankdir", "RL")
graph.Attr("labeljust", "l")
dotNodes := make([]*dot.Node, len(nodes))
avgBalance := uint64(averageBalance(headState.Balances()))
for i := len(nodes) - 1; i >= 0; i-- {
// Construct label for each node.
slot := fmt.Sprintf("%d", nodes[i].Slot())
weight := fmt.Sprintf("%d", nodes[i].Weight()/1e9) // Convert unit Gwei to unit ETH.
votes := fmt.Sprintf("%d", nodes[i].Weight()/1e9/avgBalance)
index := fmt.Sprintf("%d", i)
g := nodes[i].Graffiti()
graffiti := hex.EncodeToString(g[:8])
label := "slot: " + slot + "\n votes: " + votes + "\n weight: " + weight + "\n graffiti: " + graffiti
var dotN dot.Node
if nodes[i].Parent() != ^uint64(0) {
dotN = graph.Node(index).Box().Attr("label", label)
}
if nodes[i].Slot() == s.HeadSlot() &&
nodes[i].BestDescendant() == ^uint64(0) &&
nodes[i].Parent() != ^uint64(0) {
dotN = dotN.Attr("color", "green")
}
dotNodes[i] = &dotN
}
for i := len(nodes) - 1; i >= 0; i-- {
if nodes[i].Parent() != ^uint64(0) && nodes[i].Parent() < uint64(len(dotNodes)) {
graph.Edge(*dotNodes[i], *dotNodes[nodes[i].Parent()])
}
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "text/html")
if _, err := fmt.Fprintf(w, template, graph.String()); err != nil {
log.WithError(err).Error("Failed to render p2p info page")
}
}
func averageBalance(balances []uint64) float64 {
total := uint64(0)
for i := 0; i < len(balances); i++ {
total += balances[i]
}
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
}

View File

@@ -0,0 +1,50 @@
package blockchain
import (
"context"
"net/http"
"net/http/httptest"
"testing"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestService_TreeHandler(t *testing.T) {
req, err := http.NewRequest("GET", "/tree", nil)
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
fcs := protoarray.New(
0, // justifiedEpoch
0, // finalizedEpoch
[32]byte{'a'},
)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
s, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState)
rr := httptest.NewRecorder()
handler := http.HandlerFunc(s.TreeHandler)
handler.ServeHTTP(rr, req)
assert.Equal(t, http.StatusOK, rr.Code)
}

View File

@@ -13,7 +13,7 @@ import (
func testServiceOptsWithDB(t *testing.T) []Option {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New(0, 0, [32]byte{'a'})
return []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),

View File

@@ -17,7 +17,7 @@ import (
func TestService_newSlot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New(0, 0, [32]byte{})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -25,11 +25,11 @@ func TestService_newSlot(t *testing.T) {
}
ctx := context.Background()
require.NoError(t, fcs.ProcessBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0, true)) // genesis
require.NoError(t, fcs.ProcessBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0, true)) // finalized
require.NoError(t, fcs.ProcessBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true)) // justified
require.NoError(t, fcs.ProcessBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0, true)) // best justified
require.NoError(t, fcs.ProcessBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0, true)) // bad
require.NoError(t, fcs.ProcessBlock(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)) // genesis
require.NoError(t, fcs.ProcessBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)) // finalized
require.NoError(t, fcs.ProcessBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // justified
require.NoError(t, fcs.ProcessBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // best justified
require.NoError(t, fcs.ProcessBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)) // bad
type args struct {
slot types.Slot

View File

@@ -1,33 +0,0 @@
package blockchain
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
)
// optimisticCandidateBlock returns true if this block can be optimistically synced.
//
// Spec pseudocode definition:
// def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
// justified_is_execution_block = is_execution_block(opt_store.blocks[justified_root])
// block_is_deep = block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot
// return justified_is_execution_block or block_is_deep
func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.BeaconBlock) (bool, error) {
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
return true, nil
}
j := s.store.JustifiedCheckpt()
if j == nil {
return false, errNilJustifiedInStore
}
jBlock, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(j.Root))
if err != nil {
return false, err
}
return blocks.ExecutionBlock(jBlock.Block().Body())
}

View File

@@ -1,141 +0,0 @@
package blockchain
import (
"context"
"testing"
"time"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
func Test_IsOptimisticCandidateBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
tests := []struct {
name string
blk block.BeaconBlock
justified block.SignedBeaconBlock
want bool
}{
{
name: "deep block",
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 1
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
want: true,
},
{
name: "shallow block, Altair justified chkpt",
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 200
wr, err := wrapper.WrappedAltairBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 32
wr, err := wrapper.WrappedAltairSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
want: false,
},
{
name: "shallow block, Bellatrix justified chkpt without execution",
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
want: false,
},
{
name: "shallow block, execution enabled justified chkpt",
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
}(t),
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
blk.Block.Body.ExecutionPayload.Random = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
}(t),
want: true,
},
}
for _, tt := range tests {
jroot, err := tt.justified.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
service.store.SetJustifiedCheckpt(
&ethpb.Checkpoint{
Root: jroot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
})
candidate, err := service.optimisticCandidateBlock(ctx, tt.blk)
require.NoError(t, err)
require.Equal(t, tt.want, candidate, tt.name)
}
}

View File

@@ -1,10 +1,7 @@
package blockchain
import (
"math/big"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
)
@@ -14,16 +11,9 @@ import (
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
func validTerminalPowBlock(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
if !ok {
return false, errors.New("failed to parse terminal total difficulty")
}
ttd, of := uint256.FromBig(b)
if of {
return false, errors.New("overflow terminal total difficulty")
}
func validTerminalPowBlock(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) bool {
ttd := uint256.NewInt(params.BeaconConfig().TerminalTotalDifficulty)
totalDifficultyReached := currentDifficulty.Cmp(ttd) >= 0
parentTotalDifficultyValid := ttd.Cmp(parentDifficulty) > 0
return totalDifficultyReached && parentTotalDifficultyValid, nil
return totalDifficultyReached && parentTotalDifficultyValid
}

View File

@@ -1,13 +1,10 @@
package blockchain
import (
"fmt"
"math/big"
"testing"
"github.com/holiman/uint256"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/require"
)
func Test_validTerminalPowBlock(t *testing.T) {
@@ -64,30 +61,11 @@ func Test_validTerminalPowBlock(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = fmt.Sprint(tt.ttd)
cfg.TerminalTotalDifficulty = tt.ttd
params.OverrideBeaconConfig(cfg)
got, err := validTerminalPowBlock(tt.currentDifficulty, tt.parentDifficulty)
require.NoError(t, err)
if got != tt.want {
if got := validTerminalPowBlock(tt.currentDifficulty, tt.parentDifficulty); got != tt.want {
t.Errorf("validTerminalPowBlock() = %v, want %v", got, tt.want)
}
})
}
}
func Test_validTerminalPowBlockSpecConfig(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "115792089237316195423570985008687907853269984665640564039457584007913129638912"
params.OverrideBeaconConfig(cfg)
i, _ := new(big.Int).SetString("115792089237316195423570985008687907853269984665640564039457584007913129638912", 10)
current, of := uint256.FromBig(i)
require.Equal(t, of, false)
i, _ = new(big.Int).SetString("115792089237316195423570985008687907853269984665640564039457584007913129638911", 10)
parent, of := uint256.FromBig(i)
require.Equal(t, of, false)
got, err := validTerminalPowBlock(current, parent)
require.NoError(t, err)
require.Equal(t, true, got)
}

View File

@@ -27,7 +27,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
@@ -131,7 +131,7 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -149,7 +149,7 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
@@ -334,7 +334,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -407,8 +407,8 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0, false))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, 0, 0, false))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
require.NoError(t, err)

View File

@@ -111,7 +111,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return err
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */, false /* optimistic */); err != nil {
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
}
@@ -323,11 +323,11 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
// handles a block after the block's batch has been verified, where we can save blocks
// their state summaries and split them off to relative hot/cold storage.
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
b := signed.Block()
s.saveInitSyncBlock(blockRoot, signed)
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint, optimistic); err != nil {
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
return err
}
@@ -415,13 +415,13 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk block.BeaconBlock, root [32]byte,
st state.BeaconState, optimistic bool) error {
st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
defer span.End()
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint, optimistic); err != nil {
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
return err
}
// Feed in block's attestations to fork choice store.
@@ -440,15 +440,15 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
}
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.BeaconBlock,
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
//TODO(10261) check if the blocks are optimistic or not when filling fork choice
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
// Feed in block to fork choice store.
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()),
jCheckpoint.Epoch, fCheckpoint.Epoch, optimistic); err != nil {
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()), bytesutil.ToBytes32(blk.Body().Graffiti()),
jCheckpoint.Epoch,
fCheckpoint.Epoch); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
return nil
@@ -456,7 +456,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.Be
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool, optimistic bool) error {
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if initSync {
@@ -467,7 +467,7 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st, optimistic); err != nil {
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
}
return nil
@@ -502,3 +502,12 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
}
return nil
}
// Saves synced and validated tips to DB.
func (s *Service) saveSyncedTipsDB(ctx context.Context) error {
tips := s.cfg.ForkChoiceStore.SyncedTips()
if len(tips) == 0 {
return errors.New("no tips to save")
}
return s.cfg.BeaconDB.UpdateValidatedTips(ctx, tips)
}

View File

@@ -113,7 +113,7 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
}
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
err := fmt.Errorf("block %#x is not a descendent of the current finalized block slot %d, %#x != %#x",
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
bytesutil.Trunc(fRoot[:]))
tracing.AnnotateError(span, err)
@@ -367,9 +367,9 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.B
b := pendingNodes[i]
r := pendingRoots[i]
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()),
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()), bytesutil.ToBytes32(b.Body().Graffiti()),
jCheckpoint.Epoch,
fCheckpoint.Epoch, false /* optimistic status */); err != nil {
fCheckpoint.Epoch); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
}

View File

@@ -37,7 +37,7 @@ func TestStore_OnBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -100,7 +100,7 @@ func TestStore_OnBlock(t *testing.T) {
return b
}(),
s: st.Copy(),
wantErrString: "is not a descendant of the current finalized block",
wantErrString: "is not a descendent of the current finalized block",
},
{
name: "same slot as finalized block",
@@ -148,7 +148,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
st, keys := util.DeterministicGenesisState(t, 64)
@@ -223,7 +223,7 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
@@ -264,7 +264,7 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
b := util.NewBeaconBlock()
@@ -292,7 +292,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
b := util.NewBeaconBlock()
@@ -317,7 +317,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -357,7 +357,7 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
genesisStateRoot := [32]byte{}
@@ -382,7 +382,7 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, 5, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
@@ -398,7 +398,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
genesisStateRoot := [32]byte{}
@@ -423,7 +423,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, 5, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
// Ensure all roots and their respective blocks exist.
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
for i, rt := range wantedRoots {
@@ -442,7 +442,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
@@ -479,7 +479,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, 2, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
@@ -595,7 +595,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -668,7 +668,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0, false)) // Saves blocks to fork choice store.
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
}
r, err := service.ancestor(context.Background(), r200[:], 150)
@@ -682,7 +682,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -713,7 +713,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
}
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, 0, 0, false))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
r, err := service.ancestor(context.Background(), r200[:], 150)
require.NoError(t, err)
@@ -740,7 +740,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fcs := protoarray.New(0, 0)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -789,7 +789,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
finalizedRoot: r1,
parentRoot: r,
},
wantedErr: "is not a descendant of the current finalized block slot",
wantedErr: "is not a descendent of the current finalized block slot",
},
{
name: "is descendant",
@@ -870,7 +870,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
func TestOnBlock_CanFinalize(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0)
fcs := protoarray.New(0, 0, [32]byte{'a'})
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
@@ -996,3 +996,49 @@ func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestService_saveSyncedTipsDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
b100 := util.NewBeaconBlock()
b100.Block.Slot = 100
b100.Block.ParentRoot = r1[:]
r100, err := b100.Block.HashTreeRoot()
require.NoError(t, err)
b200 := util.NewBeaconBlock()
b200.Block.Slot = 200
b200.Block.ParentRoot = r1[:]
r200, err := b200.Block.HashTreeRoot()
require.NoError(t, err)
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
beaconBlock := util.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0))
}
require.NoError(t, service.cfg.ForkChoiceStore.UpdateSyncedTipsWithValidRoot(ctx, r100))
require.NoError(t, service.saveSyncedTipsDB(ctx))
savedTips, err := service.cfg.BeaconDB.ValidatedTips(ctx)
require.NoError(t, err)
require.Equal(t, 2, len(savedTips))
require.Equal(t, types.Slot(1), savedTips[r1])
require.Equal(t, types.Slot(100), savedTips[r100])
// Delete invalid root
require.NoError(t, service.cfg.ForkChoiceStore.UpdateSyncedTipsWithInvalidRoot(ctx, r200))
require.NoError(t, service.saveSyncedTipsDB(ctx))
savedTips, err = service.cfg.BeaconDB.ValidatedTips(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(savedTips))
require.Equal(t, types.Slot(100), savedTips[r100])
}

View File

@@ -111,7 +111,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
service.processAttestations(ctx)
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))

View File

@@ -86,9 +86,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
for i, b := range blocks {
blockCopy := b.Copy()
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy,
blkRoots[i], fCheckpoints[i],
jCheckpoints[i], false /* optimistic status */); err != nil {
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}

View File

@@ -127,7 +127,7 @@ func TestService_ReceiveBlock(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
@@ -164,7 +164,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
@@ -192,7 +192,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
// Verify fork choice has processed the block. (Genesis block and the new block)
assert.Equal(t, 2, s.cfg.ForkChoiceStore.NodeCount())
assert.Equal(t, 2, len(s.cfg.ForkChoiceStore.Nodes()))
}
func TestService_ReceiveBlockBatch(t *testing.T) {
@@ -240,9 +240,11 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
}

View File

@@ -184,7 +184,7 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
}
s.store = store.New(justified, finalized)
store := protoarray.New(justified.Epoch, finalized.Epoch)
store := protoarray.New(justified.Epoch, finalized.Epoch, bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore = store
ss, err := slots.EpochStart(finalized.Epoch)
@@ -213,7 +213,7 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: s.genesisTime,
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
GenesisValidatorsRoot: saved.GenesisValidatorRoot(),
},
})
@@ -375,7 +375,7 @@ func (s *Service) onPowchainStart(ctx context.Context, genesisTime time.Time) {
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: genesisTime,
GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(),
GenesisValidatorsRoot: initializedState.GenesisValidatorRoot(),
},
})
}
@@ -445,8 +445,9 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
genesisBlk.Block().Slot(),
genesisBlkRoot,
params.BeaconConfig().ZeroHash,
[32]byte{},
genesisCheckpoint.Epoch,
genesisCheckpoint.Epoch, false); err != nil {
genesisCheckpoint.Epoch); err != nil {
log.Fatalf("Could not process genesis block for fork choice: %v", err)
}

View File

@@ -121,7 +121,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithAttestationPool(attestations.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(protoarray.New(0, 0)),
WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)),
WithAttestationService(attService),
WithStateGen(stateGen),
}
@@ -278,7 +278,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(finalizedSlot))
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
@@ -322,7 +322,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(finalizedSlot))
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
@@ -378,7 +378,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, finalizedRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
@@ -450,7 +450,7 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
@@ -459,7 +459,7 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -517,7 +517,7 @@ func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
@@ -527,7 +527,7 @@ func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -20,6 +20,7 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -20,6 +20,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
@@ -28,8 +29,6 @@ import (
"github.com/sirupsen/logrus"
)
var ErrNilState = errors.New("nil state")
// ChainService defines the mock interface for testing
type ChainService struct {
State state.BeaconState
@@ -160,7 +159,7 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
if s.State == nil {
return ErrNilState
s.State = &v1.BeaconState{}
}
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
@@ -187,7 +186,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock, _ [][32]byte) error {
if s.State == nil {
return ErrNilState
s.State = &v1.BeaconState{}
}
for _, block := range blks {
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
@@ -216,7 +215,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.Signe
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (s *ChainService) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
if s.State == nil {
return ErrNilState
s.State = &v1.BeaconState{}
}
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
@@ -329,8 +328,8 @@ func (s *ChainService) GenesisTime() time.Time {
return s.Genesis
}
// GenesisValidatorsRoot mocks the same method in the chain service.
func (s *ChainService) GenesisValidatorsRoot() [32]byte {
// GenesisValidatorRoot mocks the same method in the chain service.
func (s *ChainService) GenesisValidatorRoot() [32]byte {
return s.ValidatorsRoot
}
@@ -370,8 +369,8 @@ func (s *ChainService) HasInitSyncBlock(rt [32]byte) bool {
return s.InitSyncBlockRoots[rt]
}
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
// HeadGenesisValidatorRoot mocks HeadGenesisValidatorRoot method in chain service.
func (_ *ChainService) HeadGenesisValidatorRoot() [32]byte {
return [32]byte{}
}
@@ -441,11 +440,6 @@ func (s *ChainService) HeadSyncContributionProofDomain(_ context.Context, _ type
}
// IsOptimistic mocks the same method in the chain service.
func (s *ChainService) IsOptimistic() (bool, error) {
return false, nil
}
// IsOptimisticForRoot mocks the same method in the chain service.
func (s *ChainService) IsOptimisticForRoot(_ [32]byte) (bool, error) {
func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
return false, nil
}

View File

@@ -30,6 +30,7 @@ go_library(
],
deps = [
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//cache/lru:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",

View File

@@ -6,6 +6,7 @@ import (
lru "github.com/hashicorp/golang-lru"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
"github.com/prysmaticlabs/prysm/runtime/version"
)
@@ -32,7 +33,8 @@ func (c *SyncCommitteeHeadStateCache) Put(slot types.Slot, st state.BeaconState)
return ErrNilValueProvided
}
if st.Version() == version.Phase0 {
_, ok := st.(*v1.BeaconState)
if ok {
return ErrIncorrectType
}

View File

@@ -102,7 +102,7 @@ func FilterSyncCommitteeVotes(s state.BeaconStateAltair, sync *ethpb.SyncAggrega
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.
func VerifySyncCommitteeSig(s state.BeaconStateAltair, syncKeys []bls.PublicKey, syncSig []byte) error {
ps := slots.PrevSlot(s.Slot())
d, err := signing.Domain(s.Fork(), slots.ToEpoch(ps), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
d, err := signing.Domain(s.Fork(), slots.ToEpoch(ps), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
if err != nil {
return err
}

View File

@@ -8,9 +8,7 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -20,7 +18,7 @@ import (
)
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
getState := func(t *testing.T, count uint64) state.BeaconStateAltair {
getState := func(t *testing.T, count uint64) *stateAltair.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
@@ -37,7 +35,7 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
}
type args struct {
state state.BeaconStateAltair
state *stateAltair.BeaconState
epoch types.Epoch
}
tests := []struct {
@@ -47,9 +45,9 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
errString string
}{
{
name: "nil inner state",
name: "nil state",
args: args{
state: &v2.BeaconState{},
state: nil,
},
wantErr: true,
errString: "nil inner state",
@@ -95,7 +93,7 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
helpers.ClearCache()
getState := func(t *testing.T, count uint64) state.BeaconStateAltair {
getState := func(t *testing.T, count uint64) *stateAltair.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
@@ -129,7 +127,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
}
func TestSyncCommittee_CanGet(t *testing.T) {
getState := func(t *testing.T, count uint64) state.BeaconStateAltair {
getState := func(t *testing.T, count uint64) *stateAltair.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := 0; i < len(validators); i++ {
blsKey, err := bls.RandKey()
@@ -149,7 +147,7 @@ func TestSyncCommittee_CanGet(t *testing.T) {
}
type args struct {
state state.BeaconStateAltair
state *stateAltair.BeaconState
epoch types.Epoch
}
tests := []struct {
@@ -159,9 +157,9 @@ func TestSyncCommittee_CanGet(t *testing.T) {
errString string
}{
{
name: "nil inner state",
name: "nil state",
args: args{
state: &v2.BeaconState{},
state: nil,
},
wantErr: true,
errString: "nil inner state",
@@ -384,7 +382,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
}
}
func getState(t *testing.T, count uint64) state.BeaconStateAltair {
func getState(t *testing.T, count uint64) *stateAltair.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := 0; i < len(validators); i++ {
blsKey, err := bls.RandKey()

View File

@@ -68,7 +68,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
numValidators := state.NumValidators()
s := &ethpb.BeaconStateAltair{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
GenesisValidatorsRoot: state.GenesisValidatorRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
@@ -137,7 +137,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
// for flag_index in participation_flag_indices:
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
func TranslateParticipation(ctx context.Context, state state.BeaconStateAltair, atts []*ethpb.PendingAttestation) (state.BeaconStateAltair, error) {
func TranslateParticipation(ctx context.Context, state *statealtair.BeaconState, atts []*ethpb.PendingAttestation) (*statealtair.BeaconState, error) {
epochParticipation, err := state.PreviousEpochParticipation()
if err != nil {
return nil, err

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
@@ -19,10 +20,12 @@ import (
func TestTranslateParticipation(t *testing.T) {
ctx := context.Background()
s, _ := util.DeterministicGenesisStateAltair(t, 64)
require.NoError(t, s.SetSlot(s.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
st, ok := s.(*stateAltair.BeaconState)
require.Equal(t, true, ok)
require.NoError(t, st.SetSlot(st.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
var err error
newState, err := altair.TranslateParticipation(ctx, s, nil)
newState, err := altair.TranslateParticipation(ctx, st, nil)
require.NoError(t, err)
participation, err := newState.PreviousEpochParticipation()
require.NoError(t, err)
@@ -53,7 +56,7 @@ func TestTranslateParticipation(t *testing.T) {
require.NoError(t, err)
require.DeepNotSSZEqual(t, make([]byte, 64), participation)
committee, err := helpers.BeaconCommitteeFromState(ctx, s, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(ctx, st, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestation.AttestingIndices(pendingAtts[0].AggregationBits, committee)
require.NoError(t, err)
@@ -77,7 +80,7 @@ func TestUpgradeToAltair(t *testing.T) {
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), aState.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), aState.GenesisValidatorsRoot())
require.DeepSSZEqual(t, preForkState.GenesisValidatorRoot(), aState.GenesisValidatorRoot())
require.Equal(t, preForkState.Slot(), aState.Slot())
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), aState.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), aState.BlockRoots())

View File

@@ -98,7 +98,6 @@ go_test(
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
"//proto/prysm/v1alpha1/wrapper:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",

View File

@@ -208,7 +208,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
beaconState.Fork(),
indexedAtt.Data.Target.Epoch,
params.BeaconConfig().DomainBeaconAttester,
beaconState.GenesisValidatorsRoot(),
beaconState.GenesisValidatorRoot(),
)
if err != nil {
return err

View File

@@ -337,9 +337,7 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
}
want := "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE"
st, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
err = blocks.VerifyIndexedAttestation(context.Background(), st, indexedAtt1)
err := blocks.VerifyIndexedAttestation(context.Background(), &v1.BeaconState{}, indexedAtt1)
assert.ErrorContains(t, want, err)
}
@@ -417,7 +415,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
Slot: 1,
},
})
prevDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch-1, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
prevDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch-1, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, prevDomain)
require.NoError(t, err)
@@ -437,7 +435,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
CommitteeIndex: 1,
},
})
currDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
currDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
require.NoError(t, err)
root, err = signing.ComputeSigningRoot(att2.Data, currDomain)
require.NoError(t, err)
@@ -476,7 +474,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
Slot: 1,
},
})
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)
@@ -540,7 +538,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
Slot: 1,
},
})
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)

View File

@@ -108,7 +108,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
@@ -177,7 +177,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
@@ -246,7 +246,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")

View File

@@ -74,8 +74,7 @@ func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
e := &ethpb.Eth1Data{}
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(e)

View File

@@ -44,7 +44,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
AttestingIndices: setA,
Signature: make([]byte, 96),
}
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err, "Could not get signing root of beacon block header")

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
@@ -176,10 +175,12 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
}
period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
var ok bool
for i := uint64(0); i < period; i++ {
processedState, err := blocks.ProcessEth1DataInBlock(context.Background(), beaconState, b.Block.Body.Eth1Data)
require.NoError(t, err)
require.Equal(t, true, processedState.Version() == version.Phase0)
beaconState, ok = processedState.(*v1.BeaconState)
require.Equal(t, true, ok)
}
newETH1DataVotes := beaconState.Eth1DataVotes()

View File

@@ -57,7 +57,7 @@ func ProcessVoluntaryExits(
if err != nil {
return nil, err
}
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorsRoot()); err != nil {
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorRoot()); err != nil {
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
}
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex)

View File

@@ -2,7 +2,6 @@ package blocks
import (
"bytes"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -17,13 +16,13 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
// MergeTransitionComplete returns true if the transition to Bellatrix has completed.
// MergeComplete returns true if the transition to Bellatrix has completed.
// Meaning the payload header in beacon state is not `ExecutionPayloadHeader()` (i.e. not empty).
//
// Spec code:
// def is_merge_transition_complete(state: BeaconState) -> bool:
// def is_merge_complete(state: BeaconState) -> bool:
// return state.latest_execution_payload_header != ExecutionPayloadHeader()
func MergeTransitionComplete(st state.BeaconState) (bool, error) {
func MergeComplete(st state.BeaconState) (bool, error) {
h, err := st.LatestExecutionPayloadHeader()
if err != nil {
return false, err
@@ -32,15 +31,15 @@ func MergeTransitionComplete(st state.BeaconState) (bool, error) {
return !isEmptyHeader(h), nil
}
// MergeTransitionBlock returns true if the input block is the terminal merge block.
// IsMergeBlock returns true if the input block is the terminal merge block.
// Meaning the header in beacon state is `ExecutionPayloadHeader()` (i.e. empty).
// And the input block has a non-empty header.
//
// Spec code:
// def is_merge_transition_block(state: BeaconState, body: BeaconBlockBody) -> bool:
// return not is_merge_transition_complete(state) and body.execution_payload != ExecutionPayload()
func MergeTransitionBlock(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
mergeComplete, err := MergeTransitionComplete(st)
// def is_merge_block(state: BeaconState, body: BeaconBlockBody) -> bool:
// return not is_merge_complete(state) and body.execution_payload != ExecutionPayload()
func IsMergeBlock(st state.BeaconState, blk block.BeaconBlockBody) (bool, error) {
mergeComplete, err := MergeComplete(st)
if err != nil {
return false, err
}
@@ -48,20 +47,8 @@ func MergeTransitionBlock(st state.BeaconState, body block.BeaconBlockBody) (boo
return false, err
}
return ExecutionBlock(body)
}
// ExecutionBlock returns whether the block has a non-empty ExecutionPayload.
//
// Spec code:
// def is_execution_block(block: BeaconBlock) -> bool:
// return block.body.execution_payload != ExecutionPayload()
func ExecutionBlock(body block.BeaconBlockBody) (bool, error) {
payload, err := body.ExecutionPayload()
payload, err := blk.ExecutionPayload()
if err != nil {
if strings.HasPrefix(err.Error(), "ExecutionPayload is not supported in") {
return false, nil
}
return false, err
}
return !isEmptyPayload(payload), nil
@@ -73,15 +60,15 @@ func ExecutionBlock(body block.BeaconBlockBody) (bool, error) {
// Spec code:
// def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool:
// return is_merge_block(state, body) or is_merge_complete(state)
func ExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
mergeBlock, err := MergeTransitionBlock(st, body)
func ExecutionEnabled(st state.BeaconState, blk block.BeaconBlockBody) (bool, error) {
mergeBlock, err := IsMergeBlock(st, blk)
if err != nil {
return false, err
}
if mergeBlock {
return true, nil
}
return MergeTransitionComplete(st)
return MergeComplete(st)
}
// ValidatePayloadWhenMergeCompletes validates if payload is valid versus input beacon state.
@@ -92,7 +79,7 @@ func ExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, e
// if is_merge_complete(state):
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
complete, err := MergeTransitionComplete(st)
complete, err := MergeComplete(st)
if err != nil {
return err
}

View File

@@ -160,7 +160,7 @@ func Test_MergeComplete(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
got, err := blocks.MergeTransitionComplete(st)
got, err := blocks.MergeComplete(st)
require.NoError(t, err)
if got != tt.want {
t.Errorf("mergeComplete() got = %v, want %v", got, tt.want)
@@ -341,49 +341,15 @@ func Test_MergeBlock(t *testing.T) {
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.MergeTransitionBlock(st, body)
got, err := blocks.IsMergeBlock(st, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
t.Errorf("IsMergeBlock() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_IsExecutionBlock(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
want bool
}{
{
name: "empty payload",
payload: emptyPayload(),
want: false,
},
{
name: "non-empty payload",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload = tt.payload
wrappedBlock, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(t, err)
got, err := blocks.ExecutionBlock(wrappedBlock.Body())
require.NoError(t, err)
require.Equal(t, tt.want, got)
})
}
}
func Test_ExecutionEnabled(t *testing.T) {
tests := []struct {
name string
@@ -651,7 +617,7 @@ func BenchmarkBellatrixComplete(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := blocks.MergeTransitionComplete(st)
_, err := blocks.MergeComplete(st)
require.NoError(b, err)
}
}

View File

@@ -378,7 +378,7 @@ func TestVerifyProposerSlashing(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
sk := sks[tt.args.slashing.Header_1.Header.ProposerIndex]
d, err := signing.Domain(tt.args.beaconState.Fork(), slots.ToEpoch(tt.args.slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer, tt.args.beaconState.GenesisValidatorsRoot())
d, err := signing.Domain(tt.args.beaconState.Fork(), slots.ToEpoch(tt.args.slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer, tt.args.beaconState.GenesisValidatorRoot())
require.NoError(t, err)
if tt.args.slashing.Header_1.Signature == nil {
sr, err := signing.ComputeSigningRoot(tt.args.slashing.Header_1.Header, d)

View File

@@ -26,7 +26,7 @@ func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) {
epoch := types.Epoch(0)
buf := make([]byte, 32)
binary.LittleEndian.PutUint64(buf, uint64(epoch))
domain, err := signing.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
require.NoError(t, err)
root, err := (&ethpb.SigningData{ObjectRoot: buf, Domain: domain}).HashTreeRoot()
require.NoError(t, err)

View File

@@ -68,7 +68,7 @@ func VerifyBlockSignature(beaconState state.ReadOnlyBeaconState,
sig []byte,
rootFunc func() ([32]byte, error)) error {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
return err
}
@@ -83,7 +83,7 @@ func VerifyBlockSignature(beaconState state.ReadOnlyBeaconState,
// VerifyBlockHeaderSignature verifies the proposer signature of a beacon block header.
func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
return err
}
@@ -104,7 +104,7 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
if err != nil {
return err
}
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
return err
}
@@ -122,7 +122,7 @@ func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
sig []byte,
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
if err != nil {
return nil, err
}
@@ -164,7 +164,7 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
buf := make([]byte, 32)
binary.LittleEndian.PutUint64(buf, uint64(currentEpoch))
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
if err != nil {
return nil, nil, nil, err
}
@@ -231,7 +231,7 @@ func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBe
}
fork := beaconState.Fork()
gvr := beaconState.GenesisValidatorsRoot()
gvr := beaconState.GenesisValidatorRoot()
dt := params.BeaconConfig().DomainBeaconAttester
// Split attestations by fork. Note: the signature domain will differ based on the fork.

View File

@@ -41,7 +41,7 @@ func TestVerifyBlockHeaderSignature(t *testing.T) {
beaconState.Fork(),
0,
params.BeaconConfig().DomainBeaconProposer,
beaconState.GenesisValidatorsRoot(),
beaconState.GenesisValidatorRoot(),
)
require.NoError(t, err)
htr, err := blockHeader.Header.HashTreeRoot()
@@ -77,7 +77,7 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
CurrentVersion: params.BeaconConfig().AltairForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
}
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorsRoot())
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorRoot())
assert.NoError(t, err)
rt, err := signing.ComputeSigningRoot(altairBlk.Block, domain)
assert.NoError(t, err)

View File

@@ -16,7 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/math"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -48,7 +47,8 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
processedState, err := ProcessRewardsAndPenaltiesPrecompute(beaconState, bp, vp, AttestationsDelta, ProposersDelta)
require.NoError(t, err)
require.Equal(t, true, processedState.Version() == version.Phase0)
beaconState, ok := processedState.(*v1.BeaconState)
require.Equal(t, true, ok)
// Indices that voted everything except for head, lost a bit money
wanted := uint64(31999810265)

View File

@@ -38,7 +38,7 @@ func UpgradeToBellatrix(ctx context.Context, state state.BeaconState) (state.Bea
s := &ethpb.BeaconStateBellatrix{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
GenesisValidatorsRoot: state.GenesisValidatorRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,

View File

@@ -19,7 +19,7 @@ func TestUpgradeToBellatrix(t *testing.T) {
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
require.DeepSSZEqual(t, preForkState.GenesisValidatorRoot(), mSt.GenesisValidatorRoot())
require.Equal(t, preForkState.Slot(), mSt.Slot())
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())

View File

@@ -23,7 +23,7 @@ var ErrSigFailedToVerify = errors.New("signature did not verify")
// ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key.
func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch types.Epoch, obj fssz.HashRoot, domain [4]byte, key bls.SecretKey) ([]byte, error) {
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorRoot())
if err != nil {
return nil, err
}
@@ -69,7 +69,7 @@ func ComputeDomainVerifySigningRoot(st state.ReadOnlyBeaconState, index types.Va
if err != nil {
return err
}
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorRoot())
if err != nil {
return err
}
@@ -219,7 +219,7 @@ func computeForkDataRoot(version, root []byte) ([32]byte, error) {
return r, nil
}
// ComputeForkDigest returns the fork for the current version and genesis validators root
// ComputeForkDigest returns the fork for the current version and genesis validator root
//
// Spec pseudocode definition:
// def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest:

View File

@@ -10,7 +10,6 @@ import (
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
@@ -34,7 +33,8 @@ func TestSkipSlotCache_OK(t *testing.T) {
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
require.NoError(t, err, "Could not run state transition")
require.Equal(t, true, executedState.Version() == version.Phase0)
originalState, ok := executedState.(*v1.BeaconState)
require.Equal(t, true, ok)
bState, err = transition.ExecuteStateTransition(context.Background(), bState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
require.NoError(t, err, "Could not process state transition")
@@ -59,7 +59,8 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
require.NoError(t, err)
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
require.NoError(t, err, "Could not run state transition")
require.Equal(t, true, executedState.Version() == version.Phase0)
originalState, ok := executedState.(*v1.BeaconState)
require.Equal(t, true, ok)
// Create two shallow but different forks
var s1, s0 state.BeaconState

View File

@@ -7,7 +7,6 @@ import (
fuzz "github.com/google/gofuzz"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestGenesisBeaconState_1000(t *testing.T) {
@@ -38,8 +37,7 @@ func TestOptimizedGenesisBeaconState_1000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
var genesisTime uint64
preState, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
preState := &v1.BeaconState{}
eth1Data := &ethpb.Eth1Data{}
for i := 0; i < 1000; i++ {
fuzzer.Fuzz(&genesisTime)

View File

@@ -10,15 +10,13 @@ import (
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestFuzzExecuteStateTransition_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
sb := &ethpb.SignedBeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
@@ -36,8 +34,7 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
sb := &ethpb.SignedBeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
@@ -55,8 +52,7 @@ func TestFuzzProcessSlot_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for i := 0; i < 1000; i++ {
@@ -72,8 +68,7 @@ func TestFuzzProcessSlots_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
slot := types.Slot(0)
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
@@ -91,8 +86,7 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
bb := &ethpb.SignedBeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
@@ -106,11 +100,10 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
}
}
func TestFuzzverifyOperationLengths_10000(t *testing.T) {
func TestFuzzverifyOperationLengths_10000(_ *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
bb := &ethpb.SignedBeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
@@ -122,11 +115,10 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) {
}
}
func TestFuzzCanProcessEpoch_10000(t *testing.T) {
func TestFuzzCanProcessEpoch_10000(_ *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for i := 0; i < 10000; i++ {
@@ -139,8 +131,7 @@ func TestFuzzProcessEpochPrecompute_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)
for i := 0; i < 1000; i++ {
@@ -156,8 +147,7 @@ func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) {
SkipSlotCache.Disable()
defer SkipSlotCache.Enable()
ctx := context.Background()
state, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
state := &v1.BeaconState{}
sb := &ethpb.SignedBeaconBlock{}
fuzzer := fuzz.NewWithSeed(0)
fuzzer.NilChance(0.1)

View File

@@ -253,7 +253,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
require.NoError(t, err)
hashTreeRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)
@@ -394,9 +394,7 @@ func TestProcessBlock_OverMaxProposerSlashings(t *testing.T) {
}
want := fmt.Sprintf("number of proposer slashings (%d) in block body exceeds allowed threshold of %d",
len(b.Block.Body.ProposerSlashings), params.BeaconConfig().MaxProposerSlashings)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
_, err := transition.VerifyOperationLengths(context.Background(), &v1.BeaconState{}, wrapper.WrappedPhase0SignedBeaconBlock(b))
assert.ErrorContains(t, want, err)
}
@@ -411,9 +409,7 @@ func TestProcessBlock_OverMaxAttesterSlashings(t *testing.T) {
}
want := fmt.Sprintf("number of attester slashings (%d) in block body exceeds allowed threshold of %d",
len(b.Block.Body.AttesterSlashings), params.BeaconConfig().MaxAttesterSlashings)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
_, err := transition.VerifyOperationLengths(context.Background(), &v1.BeaconState{}, wrapper.WrappedPhase0SignedBeaconBlock(b))
assert.ErrorContains(t, want, err)
}
@@ -427,9 +423,7 @@ func TestProcessBlock_OverMaxAttestations(t *testing.T) {
}
want := fmt.Sprintf("number of attestations (%d) in block body exceeds allowed threshold of %d",
len(b.Block.Body.Attestations), params.BeaconConfig().MaxAttestations)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
_, err := transition.VerifyOperationLengths(context.Background(), &v1.BeaconState{}, wrapper.WrappedPhase0SignedBeaconBlock(b))
assert.ErrorContains(t, want, err)
}
@@ -444,9 +438,7 @@ func TestProcessBlock_OverMaxVoluntaryExits(t *testing.T) {
}
want := fmt.Sprintf("number of voluntary exits (%d) in block body exceeds allowed threshold of %d",
len(b.Block.Body.VoluntaryExits), maxExits)
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
_, err := transition.VerifyOperationLengths(context.Background(), &v1.BeaconState{}, wrapper.WrappedPhase0SignedBeaconBlock(b))
assert.ErrorContains(t, want, err)
}

View File

@@ -31,7 +31,6 @@ go_test(
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -130,7 +129,8 @@ func TestSlashValidator_OK(t *testing.T) {
cfg := params.BeaconConfig()
slashedState, err := SlashValidator(context.Background(), state, slashedIdx, cfg.MinSlashingPenaltyQuotient, cfg.ProposerRewardQuotient)
require.NoError(t, err, "Could not slash validator")
require.Equal(t, true, slashedState.Version() == version.Phase0)
state, ok := slashedState.(*v1.BeaconState)
require.Equal(t, true, ok)
v, err := state.ValidatorAtIndex(slashedIdx)
require.NoError(t, err)

View File

@@ -59,7 +59,6 @@ type NoHeadAccessDatabase interface {
ReadOnlyDatabase
// Block related methods.
DeleteBlock(ctx context.Context, root [32]byte) error
SaveBlock(ctx context.Context, block block.SignedBeaconBlock) error
SaveBlocks(ctx context.Context, blocks []block.SignedBeaconBlock) error
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error

View File

@@ -204,34 +204,6 @@ func (s *Store) BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, []
return len(blockRoots) > 0, blockRoots, nil
}
// DeleteBlock from the db
// This deletes the root entry from all buckets in the blocks DB
// If the block is finalized this function returns an error
func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlock")
defer span.End()
if err := s.DeleteState(ctx, root); err != nil {
return errDeleteFinalized
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if b := bkt.Get(root[:]); b != nil {
return errDeleteFinalized
}
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
return err
}
if err := tx.Bucket(blockParentRootIndicesBucket).Delete(root[:]); err != nil {
return err
}
s.blockCache.Del(string(root[:]))
return nil
})
}
// SaveBlock to the db.
func (s *Store) SaveBlock(ctx context.Context, signed block.SignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlock")

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -164,44 +163,6 @@ func TestStore_BlocksHandleInvalidEndSlot(t *testing.T) {
}
}
func TestStore_DeleteBlock(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
db := setupDB(t)
ctx := context.Background()
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 1,
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
root2, err := blks[4*slotsPerEpoch-2].Block().HashTreeRoot()
require.NoError(t, err)
b, err := db.Block(ctx, root2)
require.NoError(t, err)
require.NotNil(t, b)
require.NoError(t, db.DeleteBlock(ctx, root2))
st, err = db.State(ctx, root2)
require.NoError(t, err)
require.Equal(t, st, nil)
b, err = db.Block(ctx, root2)
require.NoError(t, err)
require.Equal(t, b, nil)
require.ErrorIs(t, db.DeleteBlock(ctx, root), errDeleteFinalized)
}
func TestStore_GenesisBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()

View File

@@ -2,9 +2,6 @@ package kv
import "errors"
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
// indicate that a value couldn't be found.
var ErrNotFound = errors.New("not found in db")

View File

@@ -105,8 +105,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
}
}
datafile := KVStoreDatafilePath(dirPath)
start := time.Now()
log.Infof("Opening Bolt DB at %s", datafile)
boltDB, err := bolt.Open(
datafile,
params.BeaconIoConfig().ReadWritePermissions,
@@ -116,40 +114,29 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
},
)
if err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to open Bolt DB")
if errors.Is(err, bolt.ErrTimeout) {
return nil, errors.New("cannot obtain database lock, database may be in use by another process")
}
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Opened Bolt DB")
boltDB.AllocSize = boltAllocSize
start = time.Now()
log.Infof("Creating block cache...")
blockCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: 1000, // number of keys to track frequency of (1000).
MaxCost: BlockCacheSize, // maximum cost of cache (1000 Blocks).
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to create block cache")
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Created block cache")
start = time.Now()
log.Infof("Creating validator cache...")
validatorCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: NumOfValidatorEntries, // number of entries in cache (2 Million).
MaxCost: ValidatorEntryMaxCost, // maximum size of the cache (64Mb)
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to to create validator cache")
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Created validator cache")
kv := &Store{
db: boltDB,
@@ -159,8 +146,7 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
stateSummaryCache: newStateSummaryCache(),
ctx: ctx,
}
start = time.Now()
log.Infof("Updating DB and creating buckets...")
if err := kv.db.Update(func(tx *bolt.Tx) error {
return createBuckets(
tx,
@@ -193,10 +179,8 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
migrationsBucket,
)
}); err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to update db and create buckets")
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Updated db and created buckets")
err = prometheus.Register(createBoltCollector(kv.db))

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/golang/snappy"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
"github.com/prysmaticlabs/prysm/config/features"
@@ -20,12 +19,12 @@ import (
func Test_migrateStateValidators(t *testing.T) {
tests := []struct {
name string
setup func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
eval func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
setup func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator)
eval func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator)
}{
{
name: "only runs once",
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
setup: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
// create some new buckets that should be present for this migration
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
@@ -36,7 +35,7 @@ func Test_migrateStateValidators(t *testing.T) {
})
assert.NoError(t, err)
},
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
eval: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
// check if the migration is completed, per migration table.
err := dbStore.db.View(func(tx *bbolt.Tx) error {
migrationCompleteOrNot := tx.Bucket(migrationsBucket).Get(migrationStateValidatorsKey)
@@ -48,7 +47,7 @@ func Test_migrateStateValidators(t *testing.T) {
},
{
name: "once migrated, always enable flag",
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
setup: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
// create some new buckets that should be present for this migration
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
@@ -59,7 +58,7 @@ func Test_migrateStateValidators(t *testing.T) {
})
assert.NoError(t, err)
},
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
eval: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
// disable the flag and see if the code mandates that flag.
resetCfg := features.InitWithReset(&features.Flags{
EnableHistoricalSpaceRepresentation: false,
@@ -112,7 +111,7 @@ func Test_migrateStateValidators(t *testing.T) {
},
{
name: "migrates validators and adds them to new buckets",
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
setup: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
// create some new buckets that should be present for this migration
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
@@ -123,7 +122,7 @@ func Test_migrateStateValidators(t *testing.T) {
})
assert.NoError(t, err)
},
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
eval: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
// check whether the new buckets are present
err := dbStore.db.View(func(tx *bbolt.Tx) error {
valBkt := tx.Bucket(stateValidatorsBucket)
@@ -210,12 +209,12 @@ func Test_migrateStateValidators(t *testing.T) {
func Test_migrateAltairStateValidators(t *testing.T) {
tests := []struct {
name string
setup func(t *testing.T, dbStore *Store, state state.BeaconStateAltair, vals []*v1alpha1.Validator)
eval func(t *testing.T, dbStore *Store, state state.BeaconStateAltair, vals []*v1alpha1.Validator)
setup func(t *testing.T, dbStore *Store, state *v2.BeaconState, vals []*v1alpha1.Validator)
eval func(t *testing.T, dbStore *Store, state *v2.BeaconState, vals []*v1alpha1.Validator)
}{
{
name: "migrates validators and adds them to new buckets",
setup: func(t *testing.T, dbStore *Store, state state.BeaconStateAltair, vals []*v1alpha1.Validator) {
setup: func(t *testing.T, dbStore *Store, state *v2.BeaconState, vals []*v1alpha1.Validator) {
// create some new buckets that should be present for this migration
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
@@ -226,7 +225,7 @@ func Test_migrateAltairStateValidators(t *testing.T) {
})
assert.NoError(t, err)
},
eval: func(t *testing.T, dbStore *Store, state state.BeaconStateAltair, vals []*v1alpha1.Validator) {
eval: func(t *testing.T, dbStore *Store, state *v2.BeaconState, vals []*v1alpha1.Validator) {
// check whether the new buckets are present
err := dbStore.db.View(func(tx *bbolt.Tx) error {
valBkt := tx.Bucket(stateValidatorsBucket)
@@ -301,9 +300,9 @@ func Test_migrateAltairStateValidators(t *testing.T) {
})
defer resetCfg()
tt.setup(t, dbStore, st, vals)
tt.setup(t, dbStore, st.(*v2.BeaconState), vals)
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
tt.eval(t, dbStore, st, vals)
tt.eval(t, dbStore, st.(*v2.BeaconState), vals)
})
}
}

View File

@@ -120,6 +120,11 @@ func (_ *Service) AllDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
return []*ethpb.Deposit{}
}
// ChainStartDeposits mocks out the powchain functionality for interop.
func (s *Service) ChainStartDeposits() []*ethpb.Deposit {
return s.chainStartDeposits
}
// ChainStartEth1Data mocks out the powchain functionality for interop.
func (_ *Service) ChainStartEth1Data() *ethpb.Eth1Data {
return &ethpb.Eth1Data{}
@@ -127,11 +132,7 @@ func (_ *Service) ChainStartEth1Data() *ethpb.Eth1Data {
// PreGenesisState returns an empty beacon state.
func (_ *Service) PreGenesisState() state.BeaconState {
s, err := v1.InitializeFromProto(&ethpb.BeaconState{})
if err != nil {
panic("could not initialize state")
}
return s
return &v1.BeaconState{}
}
// ClearPreGenesisData --

View File

@@ -8,7 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
)
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
// ForkChoicer represents the full fork choice interface composed of all of the sub-interfaces.
type ForkChoicer interface {
HeadRetriever // to compute head.
BlockProcessor // to track new block for fork choice.
@@ -16,18 +16,18 @@ type ForkChoicer interface {
Pruner // to clean old data for fork choice.
Getter // to retrieve fork choice information.
ProposerBooster // ability to boost timely-proposed block roots.
SyncTipper // to update and retrieve validated sync tips.
}
// HeadRetriever retrieves head root and optimistic info of the current chain.
type HeadRetriever interface {
Head(context.Context, types.Epoch, [32]byte, []uint64, types.Epoch) ([32]byte, error)
Tips() ([][32]byte, []types.Slot)
IsOptimistic(root [32]byte) (bool, error)
Optimistic(ctx context.Context, root [32]byte, slot types.Slot) (bool, error)
}
// BlockProcessor processes the block that's used for accounting fork choice.
type BlockProcessor interface {
ProcessBlock(context.Context, types.Slot, [32]byte, [32]byte, types.Epoch, types.Epoch, bool) error
ProcessBlock(context.Context, types.Slot, [32]byte, [32]byte, [32]byte, types.Epoch, types.Epoch) error
}
// AttestationProcessor processes the attestation that's used for accounting fork choice.
@@ -48,10 +48,18 @@ type ProposerBooster interface {
// Getter returns fork choice related information.
type Getter interface {
Nodes() []*protoarray.Node
Node([32]byte) *protoarray.Node
HasNode([32]byte) bool
Store() *protoarray.Store
HasParent(root [32]byte) bool
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error)
IsCanonical(root [32]byte) bool
NodeCount() int
}
// SyncTipper returns sync tips related information.
type SyncTipper interface {
SyncedTips() map[[32]byte]types.Slot
UpdateSyncedTipsWithValidRoot(ctx context.Context, root [32]byte) error
UpdateSyncedTipsWithInvalidRoot(ctx context.Context, root [32]byte) error
}

View File

@@ -5,7 +5,7 @@ go_library(
srcs = [
"doc.go",
"errors.go",
"forkchoice.go",
"helpers.go",
"metrics.go",
"node.go",
"optimistic_sync.go",
@@ -21,6 +21,7 @@ go_library(
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
@@ -34,7 +35,7 @@ go_test(
name = "go_default_test",
srcs = [
"ffg_update_test.go",
"forkchoice_test.go",
"helpers_test.go",
"no_vote_test.go",
"node_test.go",
"optimistic_sync_test.go",

View File

@@ -1,7 +1,7 @@
/*
Package protoarray implements proto array fork choice as outlined:
https://github.com/protolambda/lmd-ghost#array-based-stateful-dag-proto_array
This was motivated by the original implementation by Sigma Prime here:
This was motivated by the the original implementation by Sigma Prime here:
https://github.com/sigp/lighthouse/pull/804
*/
package protoarray

View File

@@ -2,9 +2,12 @@ package protoarray
import "errors"
var errNilNode = errors.New("invalid nil or unknown node")
var errInvalidBalance = errors.New("invalid node balance")
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
var errInvalidNodeIndex = errors.New("node index is invalid")
var errInvalidJustifiedIndex = errors.New("justified index is invalid")
var errInvalidBestChildIndex = errors.New("best child index is invalid")
var errInvalidBestDescendantIndex = errors.New("best descendant index is invalid")
var errInvalidParentDelta = errors.New("parent delta is invalid")
var errInvalidNodeDelta = errors.New("node delta is invalid")
var errInvalidDeltaLength = errors.New("delta length is invalid")

View File

@@ -27,9 +27,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- justified: 1, finalized: 0
// |
// 3 <- justified: 2, finalized: 1
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1, false))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, [32]byte{}, 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), [32]byte{}, 1, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(2), [32]byte{}, 2, 1))
// With starting justified epoch at 0, the head should be 3:
// 0 <- start
@@ -89,17 +89,17 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// | |
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
// Left branch.
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, [32]byte{}, 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(3), indexToHash(1), [32]byte{}, 1, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(5), indexToHash(3), [32]byte{}, 1, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(7), indexToHash(5), [32]byte{}, 1, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(9), indexToHash(7), [32]byte{}, 2, 0))
// Right branch.
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, [32]byte{}, 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(4), indexToHash(2), [32]byte{}, 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(6), indexToHash(4), [32]byte{}, 0, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(8), indexToHash(6), [32]byte{}, 1, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(10), indexToHash(8), [32]byte{}, 2, 0))
// With start at 0, the head should be 10:
// 0 <-- start
@@ -183,11 +183,18 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
}
func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
ctx := context.Background()
f := New(justifiedEpoch, finalizedEpoch)
err := f.ProcessBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, justifiedEpoch, finalizedEpoch, false)
if err != nil {
return nil
}
f := New(0, 0, params.BeaconConfig().ZeroHash)
f.store.nodesIndices[params.BeaconConfig().ZeroHash] = 0
f.store.nodes = append(f.store.nodes, &Node{
slot: 0,
root: params.BeaconConfig().ZeroHash,
parent: NonExistentNode,
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
weight: 0,
})
return f
}

View File

@@ -1,274 +0,0 @@
package protoarray
import (
"context"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"go.opencensus.io/trace"
)
// This tracks the last reported head root. Used for metrics.
var lastHeadRoot [32]byte
// New initializes a new fork choice store.
func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
s := &Store{
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
pruneThreshold: defaultPruneThreshold,
}
b := make([]uint64, 0)
v := make([]Vote, 0)
return &ForkChoice{store: s, balances: b, votes: v}
}
// NodeCount returns the current number of nodes in the Store
func (f *ForkChoice) NodeCount() int {
return len(f.store.nodeByRoot)
}
// Head returns the head root from fork choice store.
// It firsts computes validator's balance changes then recalculates block tree from leaves to root.
func (f *ForkChoice) Head(
ctx context.Context,
justifiedEpoch types.Epoch,
justifiedRoot [32]byte,
justifiedStateBalances []uint64,
finalizedEpoch types.Epoch,
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.Head")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
calledHeadCount.Inc()
// Using the write lock here because `applyWeightChanges` that gets called subsequently requires a write operation.
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
f.store.updateCheckpoints(justifiedEpoch, finalizedEpoch)
if err := f.updateBalances(justifiedStateBalances); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update balances")
}
if err := f.store.applyProposerBoostScore(justifiedStateBalances); err != nil {
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
}
if err := f.store.treeRootNode.applyWeightChanges(ctx); err != nil {
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
}
if err := f.store.treeRootNode.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
}
return f.store.head(ctx, justifiedRoot)
}
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
// and update their votes accordingly.
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch types.Epoch) {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessAttestation")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
for _, index := range validatorIndices {
// Validator indices will grow the vote cache.
for index >= uint64(len(f.votes)) {
f.votes = append(f.votes, Vote{currentRoot: params.BeaconConfig().ZeroHash, nextRoot: params.BeaconConfig().ZeroHash})
}
// Newly allocated vote if the root fields are untouched.
newVote := f.votes[index].nextRoot == params.BeaconConfig().ZeroHash &&
f.votes[index].currentRoot == params.BeaconConfig().ZeroHash
// Vote gets updated if it's newly allocated or high target epoch.
if newVote || targetEpoch > f.votes[index].nextEpoch {
f.votes[index].nextEpoch = targetEpoch
f.votes[index].nextRoot = blockRoot
}
}
processedAttestationCount.Inc()
}
// ProcessBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) ProcessBlock(
ctx context.Context,
slot types.Slot,
blockRoot, parentRoot [fieldparams.RootLength]byte,
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool,
) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessBlock")
defer span.End()
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch, optimistic)
}
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
return f.store.prune(ctx, finalizedRoot)
}
// HasNode returns true if the node exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasNode(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
_, ok := f.store.nodeByRoot[root]
return ok
}
// HasParent returns true if the node parent exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasParent(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false
}
return node.parent != nil
}
// IsCanonical returns true if the given root is part of the canonical chain.
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false
}
if node.bestDescendant == nil {
if f.store.headNode.bestDescendant == nil {
return node == f.store.headNode
}
return node == f.store.headNode.bestDescendant
}
if f.store.headNode.bestDescendant == nil {
return node.bestDescendant == f.store.headNode
}
return node.bestDescendant == f.store.headNode.bestDescendant
}
// IsOptimistic returns true if the given root has been optimistically synced.
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false, errNilNode
}
return node.optimistic, nil
}
// Store returns the fork choice store
func (f *ForkChoice) Store() *Store {
return f.store
}
// AncestorRoot returns the ancestor root of input block root at a given slot.
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArray.AncestorRoot")
defer span.End()
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return nil, errNilNode
}
n := node
for n != nil && n.slot > slot {
if ctx.Err() != nil {
return nil, ctx.Err()
}
n = n.parent
}
if n == nil {
return nil, errNilNode
}
return n.root[:], nil
}
// updateBalances updates the balances that directly voted for each block taking into account the
// validators' latest votes.
func (f *ForkChoice) updateBalances(newBalances []uint64) error {
for index, vote := range f.votes {
// Skip if validator has never voted for current root and next root (i.e. if the
// votes are zero hash aka genesis block), there's nothing to compute.
if vote.currentRoot == params.BeaconConfig().ZeroHash && vote.nextRoot == params.BeaconConfig().ZeroHash {
continue
}
oldBalance := uint64(0)
newBalance := uint64(0)
// If the validator index did not exist in `f.balances` or
// `newBalances` list above, the balance is just 0.
if index < len(f.balances) {
oldBalance = f.balances[index]
}
if index < len(newBalances) {
newBalance = newBalances[index]
}
// Update only if the validator's balance or vote has changed.
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
// Ignore the vote if the root is not in fork choice
// store, that means we have not seen the block before.
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
if ok && vote.nextRoot != params.BeaconConfig().ZeroHash {
// Protection against nil node
if nextNode == nil {
return errNilNode
}
nextNode.balance += newBalance
}
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
if ok && vote.currentRoot != params.BeaconConfig().ZeroHash {
// Protection against nil node
if currentNode == nil {
return errNilNode
}
if currentNode.balance < oldBalance {
return errInvalidBalance
}
currentNode.balance -= oldBalance
}
}
// Rotate the validator vote.
f.votes[index].currentRoot = vote.nextRoot
}
f.balances = newBalances
return nil
}
// Tips returns a list of possible heads from fork choice store, it returns the
// roots and the slots of the leaf nodes.
func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
return f.store.tips()
}

View File

@@ -1,168 +0,0 @@
package protoarray
import (
"context"
"encoding/binary"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0},
{indexToHash(2), indexToHash(2), 0},
{indexToHash(3), indexToHash(3), 0},
}
// Each node gets one unique vote. The weight should look like 103 <- 102 <- 101 because
// they get propagated back.
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
s := f.store
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{100, 100, 100}
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0},
{indexToHash(2), indexToHash(2), 0},
{indexToHash(3), indexToHash(3), 0},
}
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_IsCanonical(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(4), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 6, indexToHash(6), indexToHash(5), 1, 1, false))
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
require.Equal(t, false, f.IsCanonical(indexToHash(1)))
require.Equal(t, true, f.IsCanonical(indexToHash(2)))
require.Equal(t, false, f.IsCanonical(indexToHash(3)))
require.Equal(t, true, f.IsCanonical(indexToHash(4)))
require.Equal(t, true, f.IsCanonical(indexToHash(5)))
require.Equal(t, true, f.IsCanonical(indexToHash(6)))
}
func TestForkChoice_IsCanonicalReorg(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, 1, 1, false))
f.store.nodesLock.Lock()
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
f.store.nodesLock.Unlock()
h, err := f.store.head(ctx, [32]byte{'1'})
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'3'}, h)
require.DeepEqual(t, h, f.store.headNode.root)
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
require.Equal(t, true, f.IsCanonical([32]byte{'1'}))
require.Equal(t, false, f.IsCanonical([32]byte{'2'}))
require.Equal(t, true, f.IsCanonical([32]byte{'3'}))
require.Equal(t, false, f.IsCanonical([32]byte{'4'}))
require.Equal(t, false, f.IsCanonical([32]byte{'5'}))
require.Equal(t, false, f.IsCanonical([32]byte{'6'}))
}
func TestForkChoice_AncestorRoot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(3), indexToHash(2), 1, 1, false))
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
f.store.treeRootNode.parent = nil
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
assert.NoError(t, err)
assert.Equal(t, bytesutil.ToBytes32(r), indexToHash(3))
_, err = f.AncestorRoot(ctx, indexToHash(3), 0)
assert.ErrorContains(t, errNilNode.Error(), err)
root, err := f.AncestorRoot(ctx, indexToHash(3), 5)
require.NoError(t, err)
hash3 := indexToHash(3)
require.DeepEqual(t, hash3[:], root)
root, err = f.AncestorRoot(ctx, indexToHash(3), 1)
require.NoError(t, err)
hash1 := indexToHash(1)
require.DeepEqual(t, hash1[:], root)
}
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
require.NoError(t, err)
root := bytesutil.ToBytes32(r)
require.Equal(t, root, [32]byte{'1'})
}
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
require.NoError(t, err)
root := bytesutil.ToBytes32(r)
require.Equal(t, root, [32]byte{'1'})
}
func indexToHash(i uint64) [32]byte {
var b [8]byte
binary.LittleEndian.PutUint64(b[:], i)
return hash.Hash(b[:])
}

View File

@@ -0,0 +1,92 @@
package protoarray
import (
"context"
"github.com/prysmaticlabs/prysm/config/params"
"go.opencensus.io/trace"
)
// This computes validator balance delta from validator votes.
// It returns a list of deltas that represents the difference between old balances and new balances.
func computeDeltas(
ctx context.Context,
blockIndices map[[32]byte]uint64,
votes []Vote,
oldBalances, newBalances []uint64,
) ([]int, []Vote, error) {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.computeDeltas")
defer span.End()
deltas := make([]int, len(blockIndices))
for validatorIndex, vote := range votes {
oldBalance := uint64(0)
newBalance := uint64(0)
// Skip if validator has never voted for current root and next root (ie. if the
// votes are zero hash aka genesis block), there's nothing to compute.
if vote.currentRoot == params.BeaconConfig().ZeroHash && vote.nextRoot == params.BeaconConfig().ZeroHash {
continue
}
// If the validator index did not exist in `oldBalance` or `newBalance` list above, the balance is just 0.
if validatorIndex < len(oldBalances) {
oldBalance = oldBalances[validatorIndex]
}
if validatorIndex < len(newBalances) {
newBalance = newBalances[validatorIndex]
}
// Perform delta only if the validator's balance or vote has changed.
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
// Ignore the vote if it's not known in `blockIndices`,
// that means we have not seen the block before.
nextDeltaIndex, ok := blockIndices[vote.nextRoot]
if ok {
// Protection against out of bound, the `nextDeltaIndex` which defines
// the block location in the dag can not exceed the total `delta` length.
if int(nextDeltaIndex) >= len(deltas) {
return nil, nil, errInvalidNodeDelta
}
deltas[nextDeltaIndex] += int(newBalance)
}
currentDeltaIndex, ok := blockIndices[vote.currentRoot]
if ok {
// Protection against out of bound (same as above)
if int(currentDeltaIndex) >= len(deltas) {
return nil, nil, errInvalidNodeDelta
}
deltas[currentDeltaIndex] -= int(oldBalance)
}
}
// Rotate the validator vote.
vote.currentRoot = vote.nextRoot
votes[validatorIndex] = vote
}
return deltas, votes, nil
}
// This return a copy of the proto array node object.
func copyNode(node *Node) *Node {
if node == nil {
return &Node{}
}
copiedRoot := [32]byte{}
copy(copiedRoot[:], node.root[:])
return &Node{
slot: node.slot,
root: copiedRoot,
parent: node.parent,
justifiedEpoch: node.justifiedEpoch,
finalizedEpoch: node.finalizedEpoch,
weight: node.weight,
bestChild: node.bestChild,
bestDescendant: node.bestDescendant,
}
}

View File

@@ -0,0 +1,249 @@
package protoarray
import (
"context"
"encoding/binary"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestComputeDelta_ZeroHash(t *testing.T) {
validatorCount := uint64(16)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0})
oldBalances = append(oldBalances, 0)
newBalances = append(newBalances, 0)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, int(validatorCount), len(delta))
for _, d := range delta {
assert.Equal(t, 0, d)
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_AllVoteTheSame(t *testing.T) {
validatorCount := uint64(16)
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{params.BeaconConfig().ZeroHash, indexToHash(0), 0})
oldBalances = append(oldBalances, balance)
newBalances = append(newBalances, balance)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, int(validatorCount), len(delta))
for i, d := range delta {
if i == 0 {
assert.Equal(t, balance*validatorCount, uint64(d))
} else {
assert.Equal(t, 0, d)
}
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_DifferentVotes(t *testing.T) {
validatorCount := uint64(16)
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{params.BeaconConfig().ZeroHash, indexToHash(i), 0})
oldBalances = append(oldBalances, balance)
newBalances = append(newBalances, balance)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, int(validatorCount), len(delta))
for _, d := range delta {
assert.Equal(t, balance, uint64(d))
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_MovingVotes(t *testing.T) {
validatorCount := uint64(16)
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
lastIndex := uint64(len(indices) - 1)
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{indexToHash(0), indexToHash(lastIndex), 0})
oldBalances = append(oldBalances, balance)
newBalances = append(newBalances, balance)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, int(validatorCount), len(delta))
for i, d := range delta {
if i == 0 {
assert.Equal(t, -int(balance*validatorCount), d, "First root should have negative delta")
} else if i == int(lastIndex) {
assert.Equal(t, int(balance*validatorCount), d, "Last root should have positive delta")
} else {
assert.Equal(t, 0, d)
}
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_MoveOutOfTree(t *testing.T) {
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := []uint64{balance, balance}
newBalances := []uint64{balance, balance}
indices[indexToHash(1)] = 0
votes = append(votes,
Vote{indexToHash(1), params.BeaconConfig().ZeroHash, 0},
Vote{indexToHash(1), [32]byte{'A'}, 0})
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, 1, len(delta))
assert.Equal(t, 0-2*int(balance), delta[0])
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_ChangingBalances(t *testing.T) {
oldBalance := uint64(32)
newBalance := oldBalance * 2
validatorCount := uint64(16)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
indices[indexToHash(1)] = 0
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{indexToHash(0), indexToHash(1), 0})
oldBalances = append(oldBalances, oldBalance)
newBalances = append(newBalances, newBalance)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, 16, len(delta))
for i, d := range delta {
if i == 0 {
assert.Equal(t, -int(oldBalance*validatorCount), d, "First root should have negative delta")
} else if i == 1 {
assert.Equal(t, int(newBalance*validatorCount), d, "Last root should have positive delta")
} else {
assert.Equal(t, 0, d)
}
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_ValidatorAppear(t *testing.T) {
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := []uint64{balance}
newBalances := []uint64{balance, balance}
indices[indexToHash(1)] = 0
indices[indexToHash(2)] = 1
votes = append(votes,
Vote{indexToHash(1), indexToHash(2), 0},
Vote{indexToHash(1), indexToHash(2), 0})
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, 2, len(delta))
assert.Equal(t, 0-int(balance), delta[0])
assert.Equal(t, 2*int(balance), delta[1])
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_ValidatorDisappears(t *testing.T) {
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := []uint64{balance, balance}
newBalances := []uint64{balance}
indices[indexToHash(1)] = 0
indices[indexToHash(2)] = 1
votes = append(votes,
Vote{indexToHash(1), indexToHash(2), 0},
Vote{indexToHash(1), indexToHash(2), 0})
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, 2, len(delta))
assert.Equal(t, 0-2*int(balance), delta[0])
assert.Equal(t, int(balance), delta[1])
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func indexToHash(i uint64) [32]byte {
var b [8]byte
binary.LittleEndian.PutUint64(b[:], i)
return hash.Hash(b[:])
}

View File

@@ -48,10 +48,4 @@ var (
Help: "The number of times pruning happened.",
},
)
optimisticCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proto_array_optimistic_count",
Help: "The number of blocks that have been optimistically synced.",
},
)
)

View File

@@ -24,7 +24,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 0
// /
// 2 <- head
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -33,7 +33,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 0
// / \
// head -> 2 1
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -44,7 +44,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 2 1
// |
// 3
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), [32]byte{}, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -55,7 +55,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 2 1
// | |
// head -> 4 3
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1, false))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(2), [32]byte{}, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -68,7 +68,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 4 3
// |
// 5 <- justified epoch = 2
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1, false))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), [32]byte{}, 2, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -107,7 +107,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 5
// |
// 6 <- head
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1, false))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(5), [32]byte{}, 2, 1))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")

View File

@@ -1,11 +1,7 @@
package protoarray
import (
"bytes"
"context"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
)
// Slot of the fork choice node.
@@ -13,23 +9,13 @@ func (n *Node) Slot() types.Slot {
return n.slot
}
// Balance returns the current balance of the Node
func (n *Node) Balance() uint64 {
return n.balance
}
// Optimistic returns the optimistic status of the node
func (n *Node) Optimistic() bool {
return n.optimistic
}
// Root of the fork choice node.
func (n *Node) Root() [32]byte {
return n.root
}
// Parent of the fork choice node.
func (n *Node) Parent() *Node {
func (n *Node) Parent() uint64 {
return n.parent
}
@@ -48,126 +34,17 @@ func (n *Node) Weight() uint64 {
return n.weight
}
// Children returns the children of this node
func (n *Node) Children() []*Node {
return n.children
}
// depth returns the length of the path to the root of Fork Choice
func (n *Node) depth() uint64 {
ret := uint64(0)
for node := n.parent; node != nil; node = node.parent {
ret += 1
}
return ret
}
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node. This function requires a lock
// in Store.nodesLock
func (n *Node) applyWeightChanges(ctx context.Context) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := child.applyWeightChanges(ctx); err != nil {
return err
}
childrenWeight += child.weight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
return nil
}
// updateBestDescendant updates the best descendant of this node and its children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(n.children) == 0 {
n.bestDescendant = nil
return nil
}
var bestChild *Node
bestWeight := uint64(0)
hasViableDescendant := false
for _, child := range n.children {
if child == nil {
return errNilNode
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
bestWeight = child.weight
bestChild = child
hasViableDescendant = true
} else if childLeadsToViableHead {
// If both are viable, compare their weights.
if child.weight == bestWeight {
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
bestChild = child
}
} else if child.weight > bestWeight {
bestChild = child
bestWeight = child.weight
}
}
}
if hasViableDescendant {
if bestChild.bestDescendant == nil {
n.bestDescendant = bestChild
} else {
n.bestDescendant = bestChild.bestDescendant
}
} else {
n.bestDescendant = nil
}
return nil
}
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
justified := justifiedEpoch == n.justifiedEpoch || justifiedEpoch == 0
finalized := finalizedEpoch == n.finalizedEpoch || finalizedEpoch == 0
return justified && finalized
}
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
if n.bestDescendant == nil {
return n.viableForHead(justifiedEpoch, finalizedEpoch)
}
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
// BestChild of the fork choice node.
func (n *Node) BestChild() uint64 {
return n.bestChild
}
// BestDescendant of the fork choice node.
func (n *Node) BestDescendant() *Node {
func (n *Node) BestDescendant() uint64 {
return n.bestDescendant
}
// setNodeAndParentValidated sets the current node and the parent as validated (i.e. non-optimistic).
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
if ctx.Err() != nil {
return ctx.Err()
}
if !n.optimistic || n.parent == nil {
return nil
}
n.optimistic = false
return n.parent.setNodeAndParentValidated(ctx)
// Graffiti of the fork choice node.
func (n *Node) Graffiti() [32]byte {
return n.graffiti
}

View File

@@ -1,23 +1,22 @@
package protoarray
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestNode_Getters(t *testing.T) {
slot := types.Slot(100)
root := [32]byte{'a'}
parent := &Node{}
parent := uint64(10)
jEpoch := types.Epoch(20)
fEpoch := types.Epoch(30)
weight := uint64(10000)
balance := uint64(10)
bestChild := uint64(5)
bestDescendant := uint64(4)
graffiti := [32]byte{'b'}
n := &Node{
slot: slot,
root: root,
@@ -25,7 +24,9 @@ func TestNode_Getters(t *testing.T) {
justifiedEpoch: jEpoch,
finalizedEpoch: fEpoch,
weight: weight,
balance: balance,
bestChild: bestChild,
bestDescendant: bestDescendant,
graffiti: graffiti,
}
require.Equal(t, slot, n.Slot())
@@ -34,199 +35,7 @@ func TestNode_Getters(t *testing.T) {
require.Equal(t, jEpoch, n.JustifiedEpoch())
require.Equal(t, fEpoch, n.FinalizedEpoch())
require.Equal(t, weight, n.Weight())
descendantNil := n.bestDescendant == nil
require.Equal(t, true, descendantNil)
}
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].weight = 400
s.nodeByRoot[indexToHash(2)].weight = 400
s.nodeByRoot[indexToHash(3)].weight = 400
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is not viable.
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 2, 3, false))
// Verify parent's best child and best descendant are `none`.
s := f.store
assert.Equal(t, 1, len(s.treeRootNode.children))
nilBestDescendant := s.treeRootNode.bestDescendant == nil
assert.Equal(t, true, nilBestDescendant)
}
func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
assert.Equal(t, 1, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_TestDepth(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
require.Equal(t, s.nodeByRoot[indexToHash(2)].depth(), uint64(2))
require.Equal(t, s.nodeByRoot[indexToHash(3)].depth(), uint64(1))
}
func TestNode_ViableForHead(t *testing.T) {
tests := []struct {
n *Node
justifiedEpoch types.Epoch
finalizedEpoch types.Epoch
want bool
}{
{&Node{}, 0, 0, true},
{&Node{}, 1, 0, false},
{&Node{}, 0, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch)
assert.Equal(t, tc.want, got)
}
}
func TestNode_LeadsToViableHead(t *testing.T) {
f := setup(4, 3)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(3), 4, 3, false))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3))
}
func TestNode_SetFullyValidated(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// insert blocks in the fork pattern (optimistic status in parenthesis)
//
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
// \
// -- 5 (true)
//
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(3), 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(1), 1, 1, true))
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
opt, err = f.IsOptimistic(indexToHash(4))
require.NoError(t, err)
require.Equal(t, true, opt)
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
// block 4 and 3 should now be valid
opt, err = f.IsOptimistic(indexToHash(4))
require.NoError(t, err)
require.Equal(t, false, opt)
opt, err = f.IsOptimistic(indexToHash(3))
require.NoError(t, err)
require.Equal(t, false, opt)
require.Equal(t, bestChild, n.BestChild())
require.Equal(t, bestDescendant, n.BestDescendant())
require.Equal(t, graffiti, n.Graffiti())
}

View File

@@ -2,49 +2,314 @@ package protoarray
import (
"context"
"fmt"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, root [32]byte) error {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
// This returns the minimum and maximum slot of the synced_tips tree
func (f *ForkChoice) boundarySyncedTips() (types.Slot, types.Slot) {
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return errNilNode
min := params.BeaconConfig().FarFutureSlot
max := types.Slot(0)
for _, slot := range f.syncedTips.validatedTips {
if slot > max {
max = slot
}
if slot < min {
min = slot
}
}
if !node.optimistic || node.parent == nil {
return errInvalidOptimisticStatus
return min, max
}
// Optimistic returns true if this node is optimistically synced
// A optimistically synced block is synced as usual, but its
// execution payload is not validated, while the EL is still syncing.
// WARNING: this function does not check if slot corresponds to the
// block with the given root. An incorrect response may be
// returned when requesting earlier than finalized epoch due
// to pruning of non-canonical branches. A requests for a
// combination root/slot of an available block is guaranteed
// to yield the correct result. The caller is responsible for
// checking the block's availability. A consensus bug could be
// a cause of getting this wrong, so think twice before passing
// a wrong pair.
func (f *ForkChoice) Optimistic(ctx context.Context, root [32]byte, slot types.Slot) (bool, error) {
if ctx.Err() != nil {
return false, ctx.Err()
}
children := node.parent.children
if len(children) == 1 {
node.parent.children = []*Node{}
} else {
for i, n := range children {
if n == node {
if i != len(children)-1 {
children[i] = children[len(children)-1]
// If the node is a synced tip, then it's fully validated
f.syncedTips.RLock()
_, ok := f.syncedTips.validatedTips[root]
if ok {
return false, nil
}
f.syncedTips.RUnlock()
// If the slot is higher than the max synced tip, it's optimistic
min, max := f.boundarySyncedTips()
if slot > max {
return true, nil
}
// If the slot is lower than the min synced tip, it's fully validated
if slot <= min {
return false, nil
}
// If we reached this point then the block has to be in the Fork Choice
// Store!
f.store.nodesLock.RLock()
index, ok := f.store.nodesIndices[root]
if !ok {
// This should not happen
f.store.nodesLock.RUnlock()
return false, fmt.Errorf("invalid root, slot combination, got %#x, %d",
bytesutil.Trunc(root[:]), slot)
}
node := f.store.nodes[index]
// if the node is a leaf of the Fork Choice tree, then it's
// optimistic
childIndex := node.BestChild()
if childIndex == NonExistentNode {
return true, nil
}
// recurse to the child
child := f.store.nodes[childIndex]
root = child.root
slot = child.slot
f.store.nodesLock.RUnlock()
return f.Optimistic(ctx, root, slot)
}
// This function returns the index of sync tip node that's ancestor to the input node.
// In the event of none, `NonExistentNode` is returned.
// This internal method assumes the caller holds a lock on syncedTips and s.nodesLock
func (s *Store) findSyncedTip(ctx context.Context, node *Node, syncedTips *optimisticStore) (uint64, error) {
for {
if ctx.Err() != nil {
return 0, ctx.Err()
}
if _, ok := syncedTips.validatedTips[node.root]; ok {
return s.nodesIndices[node.root], nil
}
if node.parent == NonExistentNode {
return NonExistentNode, nil
}
node = s.nodes[node.parent]
}
}
// UpdateSyncedTipsWithValidRoot updates the synced_tips map when the block with the given root becomes VALID
func (f *ForkChoice) UpdateSyncedTipsWithValidRoot(ctx context.Context, root [32]byte) error {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
// We can only update if given root is in Fork Choice
index, ok := f.store.nodesIndices[root]
if !ok {
return errInvalidNodeIndex
}
// We can only update if root is a leaf in Fork Choice
node := f.store.nodes[index]
if node.bestChild != NonExistentNode {
return errInvalidBestChildIndex
}
// Stop early if the root is part of validated tips
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
_, ok = f.syncedTips.validatedTips[root]
if ok {
return nil
}
// Cache root and slot to validated tips
f.syncedTips.validatedTips[root] = node.slot
// Compute the full valid path from the given node to its previous synced tip
// This path will now consist of fully validated blocks. Notice that
// the previous tip may have been outside the Fork Choice store.
// In this case, only one block can be in syncedTips as the whole
// Fork Choice would be a descendant of this block.
validPath := make(map[uint64]bool)
for {
if ctx.Err() != nil {
return ctx.Err()
}
parentIndex := node.parent
if parentIndex == NonExistentNode {
break
}
if parentIndex >= uint64(len(f.store.nodes)) {
return errInvalidNodeIndex
}
node = f.store.nodes[parentIndex]
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
break
}
validPath[parentIndex] = true
}
// Retrieve the list of leaves in the Fork Choice
// These are all the nodes that have NonExistentNode as best child.
leaves, err := f.store.leaves()
if err != nil {
return err
}
// For each leaf, recompute the new tip.
newTips := make(map[[32]byte]types.Slot)
for _, i := range leaves {
node = f.store.nodes[i]
j := i
for {
if ctx.Err() != nil {
return ctx.Err()
}
// Stop if we reached the previous tip
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
newTips[node.root] = node.slot
break
}
// Stop if we reach valid path
_, ok = validPath[j]
if ok {
newTips[node.root] = node.slot
break
}
j = node.parent
if j == NonExistentNode {
break
}
if j >= uint64(len(f.store.nodes)) {
return errInvalidNodeIndex
}
node = f.store.nodes[j]
}
}
f.syncedTips.validatedTips = newTips
return nil
}
// UpdateSyncedTipsWithInvalidRoot updates the synced_tips map when the block with the given root becomes INVALID.
func (f *ForkChoice) UpdateSyncedTipsWithInvalidRoot(ctx context.Context, root [32]byte) error {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
idx, ok := f.store.nodesIndices[root]
if !ok {
return errInvalidNodeIndex
}
node := f.store.nodes[idx]
// We only support changing status for the tips in Fork Choice store.
if node.bestChild != NonExistentNode {
return errInvalidNodeIndex
}
parentIndex := node.parent
// This should not happen
if parentIndex == NonExistentNode {
return errInvalidNodeIndex
}
// Update the weights of the nodes subtracting the INVALID node's weight
weight := node.weight
node = f.store.nodes[parentIndex]
for {
if ctx.Err() != nil {
return ctx.Err()
}
node.weight -= weight
if node.parent == NonExistentNode {
break
}
node = f.store.nodes[node.parent]
}
parent := copyNode(f.store.nodes[parentIndex])
// delete the invalid node, order is important
f.store.nodes = append(f.store.nodes[:idx], f.store.nodes[idx+1:]...)
delete(f.store.nodesIndices, root)
// Fix parent and best child for each node
for _, node := range f.store.nodes {
if node.parent == NonExistentNode {
node.parent = NonExistentNode
} else if node.parent > idx {
node.parent -= 1
}
if node.bestChild == NonExistentNode || node.bestChild == idx {
node.bestChild = NonExistentNode
} else if node.bestChild > idx {
node.bestChild -= 1
}
if node.bestDescendant == NonExistentNode || node.bestDescendant == idx {
node.bestDescendant = NonExistentNode
} else if node.bestDescendant > idx {
node.bestDescendant -= 1
}
}
// Update the parent's best child and best descendant if necessary.
if parent.bestChild == idx || parent.bestDescendant == idx {
for childIndex, child := range f.store.nodes {
if child.parent == parentIndex {
err := f.store.updateBestChildAndDescendant(
parentIndex, uint64(childIndex))
if err != nil {
return err
}
node.parent.children = children[:len(children)-2]
break
}
}
}
return s.removeNodeAndChildren(ctx, node)
}
// removeNodeAndChildren removes `node` and all of its descendant from the Store
func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node) error {
for _, child := range node.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := s.removeNodeAndChildren(ctx, child); err != nil {
return err
}
// Return early if the parent is not a synced_tip.
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
parentRoot := parent.root
_, ok = f.syncedTips.validatedTips[parentRoot]
if !ok {
return nil
}
delete(s.nodeByRoot, node.root)
leaves, err := f.store.leaves()
if err != nil {
return err
}
for _, i := range leaves {
node = f.store.nodes[i]
for {
if ctx.Err() != nil {
return ctx.Err()
}
// Return early if the parent is still a synced tip
if node.root == parentRoot {
return nil
}
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
break
}
if node.parent == NonExistentNode {
break
}
node = f.store.nodes[node.parent]
}
}
delete(f.syncedTips.validatedTips, parentRoot)
return nil
}

View File

@@ -4,12 +4,206 @@ import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/testing/require"
)
// We test the algorithm to update a node from SYNCING to INVALID
// We start with the same diagram as above:
// We test the algorithm to check the optimistic status of a node. The
// status for this test is the following branching diagram
//
// -- E -- F
// /
// -- C -- D
// /
// 0 -- 1 -- A -- B -- J -- K
// \ /
// -- G -- H -- I
//
// Here nodes 0, 1, A, B, C, D are fully validated and nodes
// E, F, G, H, J, K are optimistic.
// Synced Tips are nodes B, C, D
// nodes 0 and 1 are outside the Fork Choice Store.
func TestOptimistic(t *testing.T) {
root0 := bytesutil.ToBytes32([]byte("hello0"))
slot0 := types.Slot(98)
root1 := bytesutil.ToBytes32([]byte("hello1"))
slot1 := types.Slot(99)
nodeA := &Node{
slot: types.Slot(100),
root: bytesutil.ToBytes32([]byte("helloA")),
bestChild: 1,
}
nodeB := &Node{
slot: types.Slot(101),
root: bytesutil.ToBytes32([]byte("helloB")),
bestChild: 2,
parent: 0,
}
nodeC := &Node{
slot: types.Slot(102),
root: bytesutil.ToBytes32([]byte("helloC")),
bestChild: 3,
parent: 1,
}
nodeD := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloD")),
bestChild: NonExistentNode,
parent: 2,
}
nodeE := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloE")),
bestChild: 5,
parent: 2,
}
nodeF := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloF")),
bestChild: NonExistentNode,
parent: 4,
}
nodeG := &Node{
slot: types.Slot(102),
root: bytesutil.ToBytes32([]byte("helloG")),
bestChild: 7,
parent: 1,
}
nodeH := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloH")),
bestChild: 8,
parent: 6,
}
nodeI := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloI")),
bestChild: NonExistentNode,
parent: 7,
}
nodeJ := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloJ")),
bestChild: 10,
parent: 6,
}
nodeK := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloK")),
bestChild: NonExistentNode,
parent: 9,
}
nodes := []*Node{
nodeA,
nodeB,
nodeC,
nodeD,
nodeE,
nodeF,
nodeG,
nodeH,
nodeI,
nodeJ,
nodeK,
}
ni := map[[32]byte]uint64{
nodeA.root: 0,
nodeB.root: 1,
nodeC.root: 2,
nodeD.root: 3,
nodeE.root: 4,
nodeF.root: 5,
nodeG.root: 6,
nodeH.root: 7,
nodeI.root: 8,
nodeJ.root: 9,
nodeK.root: 10,
}
s := &Store{
nodes: nodes,
nodesIndices: ni,
}
tips := map[[32]byte]types.Slot{
nodeB.root: nodeB.slot,
nodeC.root: nodeC.slot,
nodeD.root: nodeD.slot,
}
st := &optimisticStore{
validatedTips: tips,
}
f := &ForkChoice{
store: s,
syncedTips: st,
}
ctx := context.Background()
// We test the implementation of boundarySyncedTips
min, max := f.boundarySyncedTips()
require.Equal(t, min, types.Slot(101), "minimum tip slot is different")
require.Equal(t, max, types.Slot(103), "maximum tip slot is different")
// We test first nodes outside the Fork Choice store
op, err := f.Optimistic(ctx, root0, slot0)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.Optimistic(ctx, root1, slot1)
require.NoError(t, err)
require.Equal(t, op, false)
// We check all nodes in the Fork Choice store.
op, err = f.Optimistic(ctx, nodeA.root, nodeA.slot)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.Optimistic(ctx, nodeB.root, nodeB.slot)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.Optimistic(ctx, nodeC.root, nodeC.slot)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.Optimistic(ctx, nodeD.root, nodeD.slot)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.Optimistic(ctx, nodeE.root, nodeE.slot)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.Optimistic(ctx, nodeF.root, nodeF.slot)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.Optimistic(ctx, nodeG.root, nodeG.slot)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.Optimistic(ctx, nodeH.root, nodeH.slot)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.Optimistic(ctx, nodeI.root, nodeI.slot)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.Optimistic(ctx, nodeJ.root, nodeJ.slot)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.Optimistic(ctx, nodeK.root, nodeK.slot)
require.NoError(t, err)
require.Equal(t, op, true)
}
// This tests the algorithm to update syncedTips
// We start with the following diagram
//
// E -- F
// /
@@ -19,52 +213,337 @@ import (
// \ \
// J -- K -- L
//
// And every block in the Fork choice is optimistic.
// And every block in the Fork choice is optimistic. Synced_Tips contains a
// single block that is outside of Fork choice
//
func TestPruneInvalid(t *testing.T) {
func TestUpdateSyncTipsWithValidRoots(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{}, 1, 1))
tests := []struct {
root [32]byte // the root of the new INVALID block
wantedNodeNumber int
root [32]byte // the root of the new VALID block
tips map[[32]byte]types.Slot // the old synced tips
newtips map[[32]byte]types.Slot // the updated synced tips
wantedErr error
}{
{
[32]byte{'j'},
12,
},
{
[32]byte{'c'},
4,
[32]byte{'i'},
map[[32]byte]types.Slot{[32]byte{'z'}: 90},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'i'},
12,
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'h'},
11,
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 103,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'e'}: 104,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
[32]byte{'j'}: 102,
},
nil,
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
[32]byte{'j'}: 102,
},
errInvalidBestChildIndex,
},
{
[32]byte{'p'},
map[[32]byte]types.Slot{},
map[[32]byte]types.Slot{},
errInvalidNodeIndex,
},
}
for _, tc := range tests {
f.syncedTips.Lock()
f.syncedTips.validatedTips = tc.tips
f.syncedTips.Unlock()
err := f.UpdateSyncedTipsWithValidRoot(context.Background(), tc.root)
if tc.wantedErr != nil {
require.ErrorIs(t, err, tc.wantedErr)
} else {
require.NoError(t, err)
f.syncedTips.RLock()
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newtips)
f.syncedTips.RUnlock()
}
}
}
// We test the algorithm to update a node from SYNCING to INVALID
// We start with the same diagram as above:
//
// E(2) -- F(1)
// /
// C(7) -- D(6)
// / \
// A(10) -- B(9) G(3) -- H(1) -- I(0)
// \ \
// J(1) -- K(1) -- L(0)
//
// And every block in the Fork choice is optimistic. Synced_Tips contains a
// single block that is outside of Fork choice. The numbers in parenthesis are
// the weights of the nodes before removal
//
func TestUpdateSyncTipsWithInvalidRoot(t *testing.T) {
tests := []struct {
root [32]byte // the root of the new INVALID block
tips map[[32]byte]types.Slot // the old synced tips
wantedParentTip bool
newBestChild uint64
newBestDescendant uint64
newParentWeight uint64
}{
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
false,
3,
4,
8,
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
},
true,
3,
4,
8,
},
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'h'}: 105,
},
true,
NonExistentNode,
NonExistentNode,
1,
},
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
false,
NonExistentNode,
NonExistentNode,
1,
},
}
for _, tc := range tests {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
require.NoError(t, f.store.removeNode(context.Background(), tc.root))
require.Equal(t, tc.wantedNodeNumber, f.store.NodeNumber())
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{}, 1, 1))
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
f.syncedTips.Lock()
f.syncedTips.validatedTips = tc.tips
f.syncedTips.Unlock()
f.store.nodesLock.Lock()
for i, node := range f.store.nodes {
node.weight = weights[i]
}
// Make j be the best child and descendant of b
nodeB := f.store.nodes[2]
nodeB.bestChild = 4
nodeB.bestDescendant = 4
idx := f.store.nodesIndices[tc.root]
node := f.store.nodes[idx]
parentIndex := node.parent
require.NotEqual(t, NonExistentNode, parentIndex)
parent := f.store.nodes[parentIndex]
f.store.nodesLock.Unlock()
err := f.UpdateSyncedTipsWithInvalidRoot(context.Background(), tc.root)
require.NoError(t, err)
f.syncedTips.RLock()
_, parentSyncedTip := f.syncedTips.validatedTips[parent.root]
f.syncedTips.RUnlock()
require.Equal(t, tc.wantedParentTip, parentSyncedTip)
require.Equal(t, tc.newBestChild, parent.bestChild)
require.Equal(t, tc.newBestDescendant, parent.bestDescendant)
require.Equal(t, tc.newParentWeight, parent.weight)
}
}
// This tests the algorithm to find the tip of a given node
// We start with the following diagram
//
// E -- F
// /
// C -- D
// / \
// A -- B G -- H -- I
// \ \
// J -- K -- L
//
//
func TestFindSyncedTip(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{}, 1, 1))
tests := []struct {
root [32]byte // the root of the block
tips map[[32]byte]types.Slot // the synced tips
wanted [32]byte // the root of expected tip
}{
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
[32]byte{'g'},
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'h'}: 104,
[32]byte{'k'}: 106,
},
[32]byte{'d'},
},
{
[32]byte{'e'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 103,
},
[32]byte{'d'},
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'b'},
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'g'},
},
}
for _, tc := range tests {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
node := f.store.nodes[f.store.nodesIndices[tc.root]]
syncedTips := &optimisticStore{
validatedTips: tc.tips,
}
syncedTips.RLock()
defer syncedTips.RUnlock()
idx, err := f.store.findSyncedTip(ctx, node, syncedTips)
require.NoError(t, err)
require.Equal(t, tc.wanted, f.store.nodes[idx].root)
}
}

View File

@@ -21,6 +21,7 @@ import (
func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
ctx := context.Background()
zeroHash := params.BeaconConfig().ZeroHash
graffiti := [32]byte{}
balances := make([]uint64, 64) // 64 active validators.
for i := 0; i < len(balances); i++ {
balances[i] = 10
@@ -46,9 +47,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
graffiti,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
@@ -70,9 +71,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
graffiti,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
@@ -88,17 +89,17 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// 2
// |
// 3 <- HEAD
slot = types.Slot(3)
newRoot = indexToHash(3)
slot = types.Slot(2)
newRoot = indexToHash(2)
require.NoError(t,
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
graffiti,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
@@ -122,9 +123,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot,
newRoot,
headRoot,
graffiti,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
@@ -136,13 +137,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
// Check the ancestor scores from the store.
require.Equal(t, 5, len(f.store.nodeByRoot))
require.Equal(t, 4, len(f.store.nodes))
// Expect nodes to have a boosted, back-propagated score.
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
require.Equal(t, f.store.treeRootNode.weight, uint64(0))
require.Equal(t, f.store.nodes[0].weight, uint64(0))
// Otherwise, assuming a block, A, that is not-genesis:
// Otherwise assuming a block, A, that is not-genesis:
//
// A -> B -> C
//
@@ -156,17 +157,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
//
// In this case, we have a small fork:
//
// (A: 54) -> (B: 44) -> (C: 34)
// \_->(D: 24)
// (A: 54) -> (B: 44) -> (C: 24)
// \_->(D: 10)
//
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// So B has its own weight, 10, and the sum of of both C and D thats why we see weight 54 in the
// middle instead of the normal progression of (44 -> 34 -> 24).
node1 := f.store.nodeByRoot[indexToHash(1)]
require.Equal(t, node1.weight, uint64(54))
node2 := f.store.nodeByRoot[indexToHash(2)]
require.Equal(t, node2.weight, uint64(44))
node3 := f.store.nodeByRoot[indexToHash(4)]
require.Equal(t, node3.weight, uint64(24))
require.Equal(t, f.store.nodes[1].weight, uint64(54))
require.Equal(t, f.store.nodes[2].weight, uint64(44))
require.Equal(t, f.store.nodes[3].weight, uint64(24))
})
t.Run("vanilla ex ante attack", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
@@ -192,9 +190,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
honestBlockSlot,
honestBlock,
zeroHash,
graffiti,
jEpoch,
fEpoch,
true,
),
)
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
@@ -209,9 +207,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
graffiti,
jEpoch,
fEpoch,
true,
),
)
@@ -258,9 +256,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
honestBlockSlot,
honestBlock,
zeroHash,
graffiti,
jEpoch,
fEpoch,
true,
),
)
@@ -277,9 +275,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
graffiti,
jEpoch,
fEpoch,
true,
),
)
@@ -333,9 +331,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
cSlot,
c,
a, // parent
graffiti,
jEpoch,
fEpoch,
true,
),
)
@@ -357,9 +355,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
bSlot,
b,
a, // parent
graffiti,
jEpoch,
fEpoch,
true,
),
)
@@ -381,9 +379,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
dSlot,
d,
b, // parent
graffiti,
jEpoch,
fEpoch,
true,
),
)

View File

@@ -1,9 +1,11 @@
package protoarray
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
@@ -14,45 +16,220 @@ import (
// before getting pruned upon new finalization.
const defaultPruneThreshold = 256
// This defines the largest staked power that an attacker is willing to
// spend in an attack to "safe head"
const safeHeadAttackersWeight = uint64(4096 * 32000000000)
// This tracks the last reported head root. Used for metrics.
var lastHeadRoot [32]byte
// applyProposerBoostScore applies the current proposer boost scores to the
// relevant nodes
func (s *Store) applyProposerBoostScore(newBalances []uint64) error {
s.proposerBoostLock.Lock()
defer s.proposerBoostLock.Unlock()
proposerScore := uint64(0)
var err error
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
if !ok || previousNode == nil {
return errInvalidProposerBoostRoot
}
previousNode.balance -= s.previousProposerBoostScore
// New initializes a new fork choice store.
func New(justifiedEpoch, finalizedEpoch types.Epoch, finalizedRoot [32]byte) *ForkChoice {
s := &Store{
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
finalizedRoot: finalizedRoot,
proposerBoostRoot: [32]byte{},
nodes: make([]*Node, 0),
nodesIndices: make(map[[32]byte]uint64),
canonicalNodes: make(map[[32]byte]bool),
pruneThreshold: defaultPruneThreshold,
}
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
if !ok || currentNode == nil {
return errInvalidProposerBoostRoot
}
proposerScore, err = computeProposerBoostScore(newBalances)
if err != nil {
return err
}
currentNode.balance += proposerScore
b := make([]uint64, 0)
v := make([]Vote, 0)
st := &optimisticStore{
validatedTips: make(map[[32]byte]types.Slot),
}
s.previousProposerBoostRoot = s.proposerBoostRoot
s.previousProposerBoostScore = proposerScore
return nil
return &ForkChoice{store: s, balances: b, votes: v, syncedTips: st}
}
// NodeNumber returns the current number of nodes in the Store
func (s *Store) NodeNumber() int {
return len(s.nodeByRoot)
// SyncedTips returns the synced and validated tips from the fork choice store.
func (f *ForkChoice) SyncedTips() map[[32]byte]types.Slot {
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
m := make(map[[32]byte]types.Slot)
for k, v := range f.syncedTips.validatedTips {
m[k] = v
}
return m
}
// Head returns the head root from fork choice store.
// It firsts computes validator's balance changes then recalculates block tree from leaves to root.
func (f *ForkChoice) Head(
ctx context.Context,
justifiedEpoch types.Epoch,
justifiedRoot [32]byte,
justifiedStateBalances []uint64,
finalizedEpoch types.Epoch,
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.Head")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
calledHeadCount.Inc()
newBalances := justifiedStateBalances
// Using the write lock here because `updateCanonicalNodes` that gets called subsequently requires a write operation.
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
deltas, newVotes, err := computeDeltas(ctx, f.store.nodesIndices, f.votes, f.balances, newBalances)
if err != nil {
return [32]byte{}, errors.Wrap(err, "Could not compute deltas")
}
f.votes = newVotes
if err := f.store.applyWeightChanges(ctx, justifiedEpoch, finalizedEpoch, newBalances, deltas); err != nil {
return [32]byte{}, errors.Wrap(err, "Could not apply score changes")
}
f.balances = newBalances
return f.store.head(ctx, justifiedRoot)
}
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
// and update their votes accordingly.
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch types.Epoch) {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessAttestation")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
for _, index := range validatorIndices {
// Validator indices will grow the vote cache.
for index >= uint64(len(f.votes)) {
f.votes = append(f.votes, Vote{currentRoot: params.BeaconConfig().ZeroHash, nextRoot: params.BeaconConfig().ZeroHash})
}
// Newly allocated vote if the root fields are untouched.
newVote := f.votes[index].nextRoot == params.BeaconConfig().ZeroHash &&
f.votes[index].currentRoot == params.BeaconConfig().ZeroHash
// Vote gets updated if it's newly allocated or high target epoch.
if newVote || targetEpoch > f.votes[index].nextEpoch {
f.votes[index].nextEpoch = targetEpoch
f.votes[index].nextRoot = blockRoot
}
}
processedAttestationCount.Inc()
}
// ProcessBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) ProcessBlock(
ctx context.Context,
slot types.Slot,
blockRoot, parentRoot, graffiti [32]byte,
justifiedEpoch, finalizedEpoch types.Epoch,
) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessBlock")
defer span.End()
return f.store.insert(ctx, slot, blockRoot, parentRoot, graffiti, justifiedEpoch, finalizedEpoch)
}
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
return f.store.prune(ctx, finalizedRoot, f.syncedTips)
}
// Nodes returns the copied list of block nodes in the fork choice store.
func (f *ForkChoice) Nodes() []*Node {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
cpy := make([]*Node, len(f.store.nodes))
copy(cpy, f.store.nodes)
return cpy
}
// Store returns the fork choice store object which contains all the information regarding proto array fork choice.
func (f *ForkChoice) Store() *Store {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
return f.store
}
// Node returns the copied node in the fork choice store.
func (f *ForkChoice) Node(root [32]byte) *Node {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
index, ok := f.store.nodesIndices[root]
if !ok {
return nil
}
return copyNode(f.store.nodes[index])
}
// HasNode returns true if the node exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasNode(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
_, ok := f.store.nodesIndices[root]
return ok
}
// HasParent returns true if the node parent exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasParent(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
i, ok := f.store.nodesIndices[root]
if !ok || i >= uint64(len(f.store.nodes)) {
return false
}
return f.store.nodes[i].parent != NonExistentNode
}
// IsCanonical returns true if the given root is part of the canonical chain.
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
return f.store.canonicalNodes[root]
}
// AncestorRoot returns the ancestor root of input block root at a given slot.
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArray.AncestorRoot")
defer span.End()
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
i, ok := f.store.nodesIndices[root]
if !ok {
return nil, errors.New("node does not exist")
}
if i >= uint64(len(f.store.nodes)) {
return nil, errors.New("node index out of range")
}
for f.store.nodes[i].slot > slot {
if ctx.Err() != nil {
return nil, ctx.Err()
}
i = f.store.nodes[i].parent
if i >= uint64(len(f.store.nodes)) {
return nil, errors.New("node index out of range")
}
}
return f.store.nodes[i].root[:], nil
}
// PruneThreshold of fork choice store.
func (s *Store) PruneThreshold() uint64 {
return s.pruneThreshold
}
// JustifiedEpoch of fork choice store.
@@ -72,51 +249,107 @@ func (s *Store) ProposerBoost() [fieldparams.RootLength]byte {
return s.proposerBoostRoot
}
// PruneThreshold of fork choice store.
func (s *Store) PruneThreshold() uint64 {
return s.pruneThreshold
// Nodes of fork choice store.
func (s *Store) Nodes() []*Node {
s.nodesLock.RLock()
defer s.nodesLock.RUnlock()
return s.nodes
}
// NodesIndices of fork choice store.
func (s *Store) NodesIndices() map[[32]byte]uint64 {
s.nodesLock.RLock()
defer s.nodesLock.RUnlock()
return s.nodesIndices
}
// head starts from justified root and then follows the best descendant links
// to find the best block for head. This function assumes a lock on s.nodesLock
// to find the best block for head.
func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.head")
defer span.End()
// JustifiedRoot has to be known
justifiedNode, ok := s.nodeByRoot[justifiedRoot]
if !ok || justifiedNode == nil {
// Justified index has to be valid in node indices map, and can not be out of bound.
justifiedIndex, ok := s.nodesIndices[justifiedRoot]
if !ok {
return [32]byte{}, errUnknownJustifiedRoot
}
// If the justified node doesn't have a best descendant,
// the best node is itself.
bestDescendant := justifiedNode.bestDescendant
if bestDescendant == nil {
bestDescendant = justifiedNode
if justifiedIndex >= uint64(len(s.nodes)) {
return [32]byte{}, errInvalidJustifiedIndex
}
if !bestDescendant.viableForHead(s.justifiedEpoch, s.finalizedEpoch) {
justifiedNode := s.nodes[justifiedIndex]
bestDescendantIndex := justifiedNode.bestDescendant
// If the justified node doesn't have a best descendent,
// the best node is itself.
if bestDescendantIndex == NonExistentNode {
bestDescendantIndex = justifiedIndex
}
if bestDescendantIndex >= uint64(len(s.nodes)) {
return [32]byte{}, errInvalidBestDescendantIndex
}
bestNode := s.nodes[bestDescendantIndex]
if !s.viableForHead(bestNode) {
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, s.finalizedEpoch, bestDescendant.justifiedEpoch, s.justifiedEpoch)
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedEpoch, bestNode.justifiedEpoch, s.justifiedEpoch)
}
// Update metrics.
if bestDescendant != s.headNode {
if bestNode.root != lastHeadRoot {
headChangesCount.Inc()
headSlotNumber.Set(float64(bestDescendant.slot))
s.headNode = bestDescendant
headSlotNumber.Set(float64(bestNode.slot))
lastHeadRoot = bestNode.root
}
return bestDescendant.root, nil
// Update canonical mapping given the head root.
if err := s.updateCanonicalNodes(ctx, bestNode.root); err != nil {
return [32]byte{}, err
}
return bestNode.root, nil
}
// updateCanonicalNodes updates the canonical nodes mapping given the input block root.
func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.updateCanonicalNodes")
defer span.End()
// Set the input node to canonical.
s.canonicalNodes[root] = true
// Get the input's parent node index.
i := s.nodesIndices[root]
n := s.nodes[i]
p := n.parent
for p != NonExistentNode {
if ctx.Err() != nil {
return ctx.Err()
}
// Get the parent node, if the node is already in canonical mapping,
// we can be sure rest of the ancestors are canonical. Exit early.
n = s.nodes[p]
if s.canonicalNodes[n.root] {
break
}
// Set parent node to canonical. Repeat until parent node index is undefined.
s.canonicalNodes[n.root] = true
p = n.parent
}
return nil
}
// insert registers a new block node to the fork choice store's node list.
// It then updates the new node's parent with best child and descendant node.
func (s *Store) insert(ctx context.Context,
slot types.Slot,
root, parentRoot [fieldparams.RootLength]byte,
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool) error {
root, parent, graffiti [32]byte,
justifiedEpoch, finalizedEpoch types.Epoch) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.insert")
defer span.End()
@@ -124,155 +357,373 @@ func (s *Store) insert(ctx context.Context,
defer s.nodesLock.Unlock()
// Return if the block has been inserted into Store before.
if _, ok := s.nodeByRoot[root]; ok {
if _, ok := s.nodesIndices[root]; ok {
return nil
}
parent := s.nodeByRoot[parentRoot]
index := uint64(len(s.nodes))
parentIndex, ok := s.nodesIndices[parent]
// Mark genesis block's parent as non existent.
if !ok {
parentIndex = NonExistentNode
}
n := &Node{
slot: slot,
root: root,
parent: parent,
graffiti: graffiti,
parent: parentIndex,
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
optimistic: optimistic,
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
weight: 0,
}
s.nodeByRoot[root] = n
if parent != nil {
parent.children = append(parent.children, n)
if err := s.treeRootNode.updateBestDescendant(ctx, s.justifiedEpoch, s.finalizedEpoch); err != nil {
s.nodesIndices[root] = index
s.nodes = append(s.nodes, n)
// Update parent with the best child and descendent only if it's available.
if n.parent != NonExistentNode {
if err := s.updateBestChildAndDescendant(parentIndex, index); err != nil {
return err
}
}
if !optimistic {
if err := n.setNodeAndParentValidated(ctx); err != nil {
return err
}
} else {
optimisticCount.Inc()
}
// Set the node as root if the store was empty
if s.treeRootNode == nil {
s.treeRootNode = n
s.headNode = n
}
// Update metrics.
processedBlockCount.Inc()
nodeCount.Set(float64(len(s.nodeByRoot)))
nodeCount.Set(float64(len(s.nodes)))
return nil
}
// updateCheckpoints Update the justified / finalized epochs in store if necessary.
func (s *Store) updateCheckpoints(justifiedEpoch, finalizedEpoch types.Epoch) {
s.justifiedEpoch = justifiedEpoch
s.finalizedEpoch = finalizedEpoch
}
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
// and its best child. For each node, it updates the weight with input delta and
// back propagate the nodes delta to its parents delta. After scoring changes,
// the best child is then updated along with best descendant.
func (s *Store) applyWeightChanges(
ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch, newBalances []uint64, delta []int,
) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.applyWeightChanges")
defer span.End()
// pruneMaps prunes the `nodeByRoot` map
// starting from `node` down to the finalized Node or to a leaf of the Fork
// choice store. This method assumes a lock on nodesLock.
func (s *Store) pruneMaps(ctx context.Context, node, finalizedNode *Node) error {
if ctx.Err() != nil {
return ctx.Err()
// The length of the nodes can not be different than length of the delta.
if len(s.nodes) != len(delta) {
return errInvalidDeltaLength
}
if node == finalizedNode {
return nil
// Update the justified / finalized epochs in store if necessary.
if s.justifiedEpoch != justifiedEpoch || s.finalizedEpoch != finalizedEpoch {
s.justifiedEpoch = justifiedEpoch
s.finalizedEpoch = finalizedEpoch
}
for _, child := range node.children {
if err := s.pruneMaps(ctx, child, finalizedNode); err != nil {
return err
// Proposer score defaults to 0.
proposerScore := uint64(0)
// Iterate backwards through all index to node in store.
var err error
for i := len(s.nodes) - 1; i >= 0; i-- {
n := s.nodes[i]
// There is no need to adjust the balances or manage parent of the zero hash, it
// is an alias to the genesis block.
if n.root == params.BeaconConfig().ZeroHash {
continue
}
nodeDelta := delta[i]
// If we have a node where the proposer boost was previously applied,
// we then decrease the delta by the required score amount.
s.proposerBoostLock.Lock()
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash && s.previousProposerBoostRoot == n.root {
nodeDelta -= int(s.previousProposerBoostScore)
}
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash && s.proposerBoostRoot == n.root {
proposerScore, err = computeProposerBoostScore(newBalances)
if err != nil {
s.proposerBoostLock.Unlock()
return err
}
nodeDelta = nodeDelta + int(proposerScore)
}
s.proposerBoostLock.Unlock()
if nodeDelta < 0 {
// A node's weight can not be negative but the delta can be negative.
if int(n.weight)+nodeDelta < 0 {
n.weight = 0
} else {
// Absolute value of node delta.
d := nodeDelta
if nodeDelta < 0 {
d *= -1
}
// Subtract node's weight.
n.weight -= uint64(d)
}
} else {
// Add node's weight.
n.weight += uint64(nodeDelta)
}
s.nodes[i] = n
// Update parent's best child and descendent if the node has a known parent.
if n.parent != NonExistentNode {
// Protection against node parent index out of bound. This should not happen.
if int(n.parent) >= len(delta) {
return errInvalidParentDelta
}
// Back propagate the nodes delta to its parent.
delta[n.parent] += nodeDelta
}
}
// Set the previous boosted root and score.
s.proposerBoostLock.Lock()
s.previousProposerBoostRoot = s.proposerBoostRoot
s.previousProposerBoostScore = proposerScore
s.proposerBoostLock.Unlock()
for i := len(s.nodes) - 1; i >= 0; i-- {
n := s.nodes[i]
if n.parent != NonExistentNode {
if int(n.parent) >= len(delta) {
return errInvalidParentDelta
}
if err := s.updateBestChildAndDescendant(n.parent, uint64(i)); err != nil {
return err
}
}
}
delete(s.nodeByRoot, node.root)
return nil
}
// prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.Prune")
// updateBestChildAndDescendant updates parent node's best child and descendent.
// It looks at input parent node and input child node and potentially modifies parent's best
// child and best descendent indices.
// There are four outcomes:
// 1.) The child is already the best child but it's now invalid due to a FFG change and should be removed.
// 2.) The child is already the best child and the parent is updated with the new best descendant.
// 3.) The child is not the best child but becomes the best child.
// 4.) The child is not the best child and does not become best child.
func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) error {
// Protection against parent index out of bound, this should not happen.
if parentIndex >= uint64(len(s.nodes)) {
return errInvalidNodeIndex
}
parent := s.nodes[parentIndex]
// Protection against child index out of bound, again this should not happen.
if childIndex >= uint64(len(s.nodes)) {
return errInvalidNodeIndex
}
child := s.nodes[childIndex]
// Is the child viable to become head? Based on justification and finalization rules.
childLeadsToViableHead, err := s.leadsToViableHead(child)
if err != nil {
return err
}
// Define 3 variables for the 3 outcomes mentioned above. This is to
// set `parent.bestChild` and `parent.bestDescendant` to. These
// aliases are to assist readability.
changeToNone := []uint64{NonExistentNode, NonExistentNode}
bestDescendant := child.bestDescendant
if bestDescendant == NonExistentNode {
bestDescendant = childIndex
}
changeToChild := []uint64{childIndex, bestDescendant}
noChange := []uint64{parent.bestChild, parent.bestDescendant}
var newParentChild []uint64
if parent.bestChild != NonExistentNode {
if parent.bestChild == childIndex && !childLeadsToViableHead {
// If the child is already the best child of the parent but it's not viable for head,
// we should remove it. (Outcome 1)
newParentChild = changeToNone
} else if parent.bestChild == childIndex {
// If the child is already the best child of the parent, set it again to ensure best
// descendent of the parent is updated. (Outcome 2)
newParentChild = changeToChild
} else {
// Protection against parent's best child going out of bound.
if parent.bestChild > uint64(len(s.nodes)) {
return errInvalidBestDescendantIndex
}
bestChild := s.nodes[parent.bestChild]
// Is current parent's best child viable to be head? Based on justification and finalization rules.
bestChildLeadsToViableHead, err := s.leadsToViableHead(bestChild)
if err != nil {
return err
}
if childLeadsToViableHead && !bestChildLeadsToViableHead {
// The child leads to a viable head, but the current parent's best child doesnt.
newParentChild = changeToChild
} else if !childLeadsToViableHead && bestChildLeadsToViableHead {
// The child doesn't lead to a viable head, the current parent's best child does.
newParentChild = noChange
} else if child.weight == bestChild.weight {
// If both are viable, compare their weights.
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
newParentChild = changeToChild
} else {
newParentChild = noChange
}
} else {
// Choose winner by weight.
if child.weight > bestChild.weight {
newParentChild = changeToChild
} else {
newParentChild = noChange
}
}
}
} else {
if childLeadsToViableHead {
// If parent doesn't have a best child and the child is viable.
newParentChild = changeToChild
} else {
// If parent doesn't have a best child and the child is not viable.
newParentChild = noChange
}
}
// Update parent with the outcome.
parent.bestChild = newParentChild[0]
parent.bestDescendant = newParentChild[1]
s.nodes[parentIndex] = parent
return nil
}
// prune prunes the store with the new finalized root. The tree is only
// pruned if the input finalized root are different than the one in stored and
// the number of the nodes in store has met prune threshold.
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *optimisticStore) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
defer span.End()
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
if !ok || finalizedNode == nil {
// The node would have seen finalized root or else it
// wouldn't be able to prune it.
finalizedIndex, ok := s.nodesIndices[finalizedRoot]
if !ok {
return errUnknownFinalizedRoot
}
// The number of the nodes has not met the prune threshold.
// Pruning at small numbers incurs more cost than benefit.
if finalizedNode.depth() < s.pruneThreshold {
if finalizedIndex < s.pruneThreshold {
return nil
}
// Prune nodeByRoot starting from root
if err := s.pruneMaps(ctx, s.treeRootNode, finalizedNode); err != nil {
// Traverse through the node list starting from the finalized node at index 0.
// Nodes that are not branching off from the finalized node will be removed.
syncedTips.Lock()
defer syncedTips.Unlock()
canonicalNodesMap := make(map[uint64]uint64, uint64(len(s.nodes))-finalizedIndex)
canonicalNodes := make([]*Node, 1, uint64(len(s.nodes))-finalizedIndex)
finalizedNode := s.nodes[finalizedIndex]
finalizedTipIndex, err := s.findSyncedTip(ctx, finalizedNode, syncedTips)
if err != nil {
return err
}
finalizedNode.parent = NonExistentNode
canonicalNodes[0] = finalizedNode
canonicalNodesMap[finalizedIndex] = uint64(0)
finalizedNode.parent = nil
s.treeRootNode = finalizedNode
for idx := uint64(0); idx < uint64(len(s.nodes)); idx++ {
node := copyNode(s.nodes[idx])
parentIdx, ok := canonicalNodesMap[node.parent]
if ok {
s.nodesIndices[node.root] = uint64(len(canonicalNodes))
canonicalNodesMap[idx] = uint64(len(canonicalNodes))
node.parent = parentIdx
canonicalNodes = append(canonicalNodes, node)
} else {
// Remove node and synced tip that is not part of finalized branch.
delete(s.nodesIndices, node.root)
_, ok := syncedTips.validatedTips[node.root]
if ok && idx != finalizedTipIndex {
delete(syncedTips.validatedTips, node.root)
}
}
}
s.nodesIndices[finalizedRoot] = uint64(0)
// Recompute best child and descendant for each canonical nodes.
for _, node := range canonicalNodes {
if node.bestChild != NonExistentNode {
node.bestChild = canonicalNodesMap[node.bestChild]
}
if node.bestDescendant != NonExistentNode {
node.bestDescendant = canonicalNodesMap[node.bestDescendant]
}
}
s.nodes = canonicalNodes
prunedCount.Inc()
return nil
}
// tips returns a list of possible heads from fork choice store, it returns the
// roots and the slots of the leaf nodes.
func (s *Store) tips() ([][32]byte, []types.Slot) {
var roots [][32]byte
var slots []types.Slot
for root, node := range s.nodeByRoot {
if len(node.children) == 0 {
roots = append(roots, root)
slots = append(slots, node.slot)
// leadsToViableHead returns true if the node or the best descendent of the node is viable for head.
// Any node with diff finalized or justified epoch than the ones in fork choice store
// should not be viable to head.
func (s *Store) leadsToViableHead(node *Node) (bool, error) {
var bestDescendentViable bool
bestDescendentIndex := node.bestDescendant
// If the best descendant is not part of the leaves.
if bestDescendentIndex != NonExistentNode {
// Protection against out of bound, best descendent index can not be
// exceeds length of nodes list.
if bestDescendentIndex >= uint64(len(s.nodes)) {
return false, errInvalidBestDescendantIndex
}
bestDescendentNode := s.nodes[bestDescendentIndex]
bestDescendentViable = s.viableForHead(bestDescendentNode)
}
// The node is viable as long as the best descendent is viable.
return bestDescendentViable || s.viableForHead(node), nil
}
// viableForHead returns true if the node is viable to head.
// Any node with diff finalized or justified epoch than the ones in fork choice store
// should not be viable to head.
func (s *Store) viableForHead(node *Node) bool {
// `node` is viable if its justified epoch and finalized epoch are the same as the one in `Store`.
// It's also viable if we are in genesis epoch.
justified := s.justifiedEpoch == node.justifiedEpoch || s.justifiedEpoch == 0
finalized := s.finalizedEpoch == node.finalizedEpoch || s.finalizedEpoch == 0
return justified && finalized
}
// Returns the list of leaves in the Fork Choice store.
// These are all the nodes that have NonExistentNode as best child.
// This internal method assumes that the caller holds a lock in s.nodesLock.
func (s *Store) leaves() ([]uint64, error) {
var leaves []uint64
for i := uint64(0); i < uint64(len(s.nodes)); i++ {
node := s.nodes[i]
if node.bestChild == NonExistentNode {
leaves = append(leaves, i)
}
}
return roots, slots
}
//TreeRoot returns the current root Node of the Store
func (s *Store) TreeRoot() *Node {
s.nodesLock.RLock()
defer s.nodesLock.RUnlock()
return s.treeRootNode
}
// Safe returns whether the block is considered safe with respect to
// synchronicity assumptions on the network and how much an attacker is willing
// to spend
func (s *Store) Safe(ctx context.Context, root [32]byte, committeeWeight uint64) (bool, error) {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return false, errNilNode
}
if node.bestDescendant != nil && node.bestDescendant != s.headNode {
return false, nil
}
potentialVotes := uint64(s.headNode.slot-node.slot+1) * committeeWeight
actualVotes := node.weight
tailWeight := uint64(0)
for node.parent != nil {
if ctx.Err() != nil {
return false, ctx.Err()
}
node = node.parent
tailWeight += node.balance
}
s.proposerBoostLock.RLock()
defer s.proposerBoostLock.RUnlock()
return tailWeight+2*actualVotes > potentialVotes+safeHeadAttackersWeight+s.previousProposerBoostScore, nil
return leaves, nil
}

View File

@@ -6,6 +6,7 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -39,159 +40,447 @@ func TestStore_FinalizedEpoch(t *testing.T) {
}
}
func TestStore_NodeNumber(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.Equal(t, 2, f.store.NodeNumber())
func TestStore_Nodes(t *testing.T) {
nodes := []*Node{
{slot: 100},
{slot: 101},
}
s := &Store{
nodes: nodes,
}
require.DeepEqual(t, nodes, s.Nodes())
}
func TestStore_NodeByRoot(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
node0 := f.store.treeRootNode
node1 := node0.children[0]
node2 := node1.children[0]
expectedRoots := map[[32]byte]*Node{
params.BeaconConfig().ZeroHash: node0,
indexToHash(1): node1,
indexToHash(2): node2,
func TestStore_NodesIndices(t *testing.T) {
nodeIndices := map[[32]byte]uint64{
{'a'}: 1,
{'b'}: 2,
}
require.Equal(t, 3, f.store.NodeNumber())
for root, node := range f.store.nodeByRoot {
v, ok := expectedRoots[root]
require.Equal(t, ok, true)
require.Equal(t, v, node)
s := &Store{
nodesIndices: nodeIndices,
}
require.DeepEqual(t, nodeIndices, s.NodesIndices())
}
func TestForkChoice_HasNode(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.Equal(t, true, f.HasNode(indexToHash(1)))
nodeIndices := map[[32]byte]uint64{
{'a'}: 1,
{'b'}: 2,
}
s := &Store{
nodesIndices: nodeIndices,
}
f := &ForkChoice{store: s}
require.Equal(t, true, f.HasNode([32]byte{'a'}))
}
func TestForkChoice_Store(t *testing.T) {
nodeIndices := map[[32]byte]uint64{
{'a'}: 1,
{'b'}: 2,
}
s := &Store{
nodesIndices: nodeIndices,
}
f := &ForkChoice{store: s}
require.DeepEqual(t, s, f.Store())
}
func TestForkChoice_Nodes(t *testing.T) {
nodes := []*Node{
{slot: 100},
{slot: 101},
}
s := &Store{
nodes: nodes,
}
f := &ForkChoice{store: s}
require.DeepEqual(t, s.nodes, f.Nodes())
}
func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
f := setup(0, 0)
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
_, err := f.store.head(context.Background(), [32]byte{'a'})
_, err := s.head(context.Background(), [32]byte{})
assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err)
}
func TestStore_Head_UnknownJustifiedIndex(t *testing.T) {
r := [32]byte{'A'}
indices := make(map[[32]byte]uint64)
indices[r] = 1
s := &Store{nodesIndices: indices}
_, err := s.head(context.Background(), r)
assert.ErrorContains(t, errInvalidJustifiedIndex.Error(), err)
}
func TestStore_Head_Itself(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
r := [32]byte{'A'}
indices := make(map[[32]byte]uint64)
indices[r] = 0
// Since the justified node does not have a best descendant so the best node
// is itself.
h, err := f.store.head(context.Background(), indexToHash(1))
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: NonExistentNode}}, canonicalNodes: make(map[[32]byte]bool)}
h, err := s.head(context.Background(), r)
require.NoError(t, err)
assert.Equal(t, indexToHash(1), h)
assert.Equal(t, r, h)
}
func TestStore_Head_BestDescendant(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(4), indexToHash(2), 0, 0, false))
h, err := f.store.head(context.Background(), indexToHash(1))
r := [32]byte{'A'}
best := [32]byte{'B'}
indices := make(map[[32]byte]uint64)
indices[r] = 0
// Since the justified node's best descendent is at index 1 and it's root is `best`,
// the head should be `best`.
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1}, {root: best}}, canonicalNodes: make(map[[32]byte]bool)}
h, err := s.head(context.Background(), r)
require.NoError(t, err)
require.Equal(t, h, indexToHash(4))
assert.Equal(t, best, h)
}
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
func TestStore_Head_ContextCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
r := [32]byte{'A'}
best := [32]byte{'B'}
indices := make(map[[32]byte]uint64)
indices[r] = 0
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1}, {root: best}}, canonicalNodes: make(map[[32]byte]bool)}
cancel()
err := f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false)
_, err := s.head(ctx, r)
require.ErrorContains(t, "context canceled", err)
}
func TestStore_Insert(t *testing.T) {
func TestStore_Insert_UnknownParent(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), 1, 1, false))
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
child := treeRootNode.children[0]
assert.Equal(t, types.Epoch(1), child.justifiedEpoch, "Incorrect justification")
assert.Equal(t, types.Epoch(1), child.finalizedEpoch, "Incorrect finalization")
assert.Equal(t, indexToHash(100), child.root, "Incorrect root")
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{'B'}, [32]byte{}, 1, 1))
assert.Equal(t, 1, len(s.nodes), "Did not insert block")
assert.Equal(t, 1, len(s.nodesIndices), "Did not insert block")
assert.Equal(t, NonExistentNode, s.nodes[0].parent, "Incorrect parent")
assert.Equal(t, types.Epoch(1), s.nodes[0].justifiedEpoch, "Incorrect justification")
assert.Equal(t, types.Epoch(1), s.nodes[0].finalizedEpoch, "Incorrect finalization")
assert.Equal(t, [32]byte{'A'}, s.nodes[0].root, "Incorrect root")
}
func TestStore_updateCheckpoints(t *testing.T) {
f := setup(0, 0)
s := f.store
func TestStore_Insert_KnownParent(t *testing.T) {
// Similar to UnknownParent test, but this time the new node has a valid parent already in store.
// The new node builds on top of the parent.
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
s.nodes = []*Node{{}}
p := [32]byte{'B'}
s.nodesIndices[p] = 0
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, p, [32]byte{}, 1, 1))
assert.Equal(t, 2, len(s.nodes), "Did not insert block")
assert.Equal(t, 2, len(s.nodesIndices), "Did not insert block")
assert.Equal(t, uint64(0), s.nodes[1].parent, "Incorrect parent")
assert.Equal(t, types.Epoch(1), s.nodes[1].justifiedEpoch, "Incorrect justification")
assert.Equal(t, types.Epoch(1), s.nodes[1].finalizedEpoch, "Incorrect finalization")
assert.Equal(t, [32]byte{'A'}, s.nodes[1].root, "Incorrect root")
}
s.updateCheckpoints(1, 1)
func TestStore_ApplyScoreChanges_InvalidDeltaLength(t *testing.T) {
s := &Store{}
// This will fail because node indices has length of 0, and delta list has a length of 1.
err := s.applyWeightChanges(context.Background(), 0, 0, []uint64{}, []int{1})
assert.ErrorContains(t, errInvalidDeltaLength.Error(), err)
}
func TestStore_ApplyScoreChanges_UpdateEpochs(t *testing.T) {
s := &Store{}
// The justified and finalized epochs in Store should be updated to 1 and 1 given the following input.
require.NoError(t, s.applyWeightChanges(context.Background(), 1, 1, []uint64{}, []int{}))
assert.Equal(t, types.Epoch(1), s.justifiedEpoch, "Did not update justified epoch")
assert.Equal(t, types.Epoch(1), s.finalizedEpoch, "Did not update finalized epoch")
}
func TestStore_ApplyScoreChanges_UpdateWeightsPositiveDelta(t *testing.T) {
// Construct 3 nodes with weight 100 on each node. The 3 nodes linked to each other.
s := &Store{nodes: []*Node{
{root: [32]byte{'A'}, weight: 100},
{root: [32]byte{'A'}, weight: 100},
{parent: 1, root: [32]byte{'A'}, weight: 100}}}
// Each node gets one unique vote. The weight should look like 103 <- 102 <- 101 because
// they get propagated back.
require.NoError(t, s.applyWeightChanges(context.Background(), 0, 0, []uint64{}, []int{1, 1, 1}))
assert.Equal(t, uint64(103), s.nodes[0].weight)
assert.Equal(t, uint64(102), s.nodes[1].weight)
assert.Equal(t, uint64(101), s.nodes[2].weight)
}
func TestStore_ApplyScoreChanges_UpdateWeightsNegativeDelta(t *testing.T) {
// Construct 3 nodes with weight 100 on each node. The 3 nodes linked to each other.
s := &Store{nodes: []*Node{
{root: [32]byte{'A'}, weight: 100},
{root: [32]byte{'A'}, weight: 100},
{parent: 1, root: [32]byte{'A'}, weight: 100}}}
// Each node gets one unique vote which contributes to negative delta.
// The weight should look like 97 <- 98 <- 99 because they get propagated back.
require.NoError(t, s.applyWeightChanges(context.Background(), 0, 0, []uint64{}, []int{-1, -1, -1}))
assert.Equal(t, uint64(97), s.nodes[0].weight)
assert.Equal(t, uint64(98), s.nodes[1].weight)
assert.Equal(t, uint64(99), s.nodes[2].weight)
}
func TestStore_ApplyScoreChanges_UpdateWeightsMixedDelta(t *testing.T) {
// Construct 3 nodes with weight 100 on each node. The 3 nodes linked to each other.
s := &Store{nodes: []*Node{
{root: [32]byte{'A'}, weight: 100},
{root: [32]byte{'A'}, weight: 100},
{parent: 1, root: [32]byte{'A'}, weight: 100}}}
// Each node gets one mixed vote. The weight should look like 100 <- 200 <- 250.
require.NoError(t, s.applyWeightChanges(context.Background(), 0, 0, []uint64{}, []int{-100, -50, 150}))
assert.Equal(t, uint64(100), s.nodes[0].weight)
assert.Equal(t, uint64(200), s.nodes[1].weight)
assert.Equal(t, uint64(250), s.nodes[2].weight)
}
func TestStore_UpdateBestChildAndDescendant_RemoveChild(t *testing.T) {
// Make parent's best child equal's to input child index and child is not viable.
s := &Store{nodes: []*Node{{bestChild: 1}, {}}, justifiedEpoch: 1, finalizedEpoch: 1}
require.NoError(t, s.updateBestChildAndDescendant(0, 1))
// Verify parent's best child and best descendant are `none`.
assert.Equal(t, NonExistentNode, s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, NonExistentNode, s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_UpdateDescendant(t *testing.T) {
// Make parent's best child equal to child index and child is viable.
s := &Store{nodes: []*Node{{bestChild: 1}, {bestDescendant: NonExistentNode}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 1))
// Verify parent's best child is the same and best descendant is not set to child index.
assert.Equal(t, uint64(1), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(1), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_ChangeChildByViability(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parents best child doesnt lead to viable index.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are set to child index.
assert.Equal(t, uint64(2), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(2), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_ChangeChildByWeight(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parents best child leads to viable index but child has more weight than parent's best child.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1, weight: 1}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are set to child index.
assert.Equal(t, uint64(2), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(2), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_ChangeChildAtLeaf(t *testing.T) {
// Make parent's best child to none and input child leads to viable index.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are set to child index.
assert.Equal(t, uint64(2), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(2), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_NoChangeByViability(t *testing.T) {
// Make parent's best child not equal to child index, child leads to not viable index and
// parents best child leads to viable index.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are not changed.
assert.Equal(t, uint64(1), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(0), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_NoChangeByWeight(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parents best child leads to viable index but parent's best child has more weight.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1, weight: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are not changed.
assert.Equal(t, uint64(1), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(0), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_NoChangeAtLeaf(t *testing.T) {
// Make parent's best child to none and input child does not lead to viable index.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are not changed.
assert.Equal(t, NonExistentNode, s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(0), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_Prune_LessThanThreshold(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
numOfNodes := 100
indices := make(map[[32]byte]uint64)
nodes := make([]*Node, 0)
indices[indexToHash(uint64(0))] = uint64(0)
nodes = append(nodes, &Node{
slot: types.Slot(0),
root: indexToHash(uint64(0)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(1),
parent: NonExistentNode,
})
for i := 1; i < numOfNodes-1; i++ {
indices[indexToHash(uint64(i))] = uint64(i)
nodes = append(nodes, &Node{
slot: types.Slot(i),
root: indexToHash(uint64(i)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(i + 1),
parent: uint64(i) - 1,
})
}
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
nodes = append(nodes, &Node{
slot: types.Slot(numOfNodes - 1),
root: indexToHash(uint64(numOfNodes - 1)),
bestDescendant: NonExistentNode,
bestChild: NonExistentNode,
parent: uint64(numOfNodes - 2),
})
s := f.store
s.pruneThreshold = 100
s := &Store{nodes: nodes, nodesIndices: indices, pruneThreshold: 100}
syncedTips := &optimisticStore{}
// Finalized root has depth 99 so everything before it should be pruned,
// Finalized root is at index 99 so everything before 99 should be pruned,
// but PruneThreshold is at 100 so nothing will be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
assert.Equal(t, 100, len(s.nodeByRoot), "Incorrect nodes count")
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
assert.Equal(t, 100, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 100, len(s.nodesIndices), "Incorrect node indices count")
}
func TestStore_Prune_MoreThanThreshold(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
numOfNodes := 100
indices := make(map[[32]byte]uint64)
nodes := make([]*Node, 0)
indices[indexToHash(uint64(0))] = uint64(0)
nodes = append(nodes, &Node{
slot: types.Slot(0),
root: indexToHash(uint64(0)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(1),
parent: NonExistentNode,
})
for i := 1; i < numOfNodes-1; i++ {
indices[indexToHash(uint64(i))] = uint64(i)
nodes = append(nodes, &Node{
slot: types.Slot(i),
root: indexToHash(uint64(i)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(i + 1),
parent: uint64(i) - 1,
})
}
s := f.store
s.pruneThreshold = 0
nodes = append(nodes, &Node{
slot: types.Slot(numOfNodes - 1),
root: indexToHash(uint64(numOfNodes - 1)),
bestDescendant: NonExistentNode,
bestChild: NonExistentNode,
parent: uint64(numOfNodes - 2),
})
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
s := &Store{nodes: nodes, nodesIndices: indices}
syncedTips := &optimisticStore{}
// Finalized root is at index 99 so everything before 99 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
assert.Equal(t, 1, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 1, len(s.nodesIndices), "Incorrect node indices count")
}
func TestStore_Prune_MoreThanOnce(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
numOfNodes := 100
indices := make(map[[32]byte]uint64)
nodes := make([]*Node, 0)
nodes = append(nodes, &Node{
slot: types.Slot(0),
root: indexToHash(uint64(0)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(1),
parent: NonExistentNode,
})
for i := 1; i < numOfNodes-1; i++ {
indices[indexToHash(uint64(i))] = uint64(i)
nodes = append(nodes, &Node{
slot: types.Slot(i),
root: indexToHash(uint64(i)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(i + 1),
parent: uint64(i) - 1,
})
}
nodes = append(nodes, &Node{
slot: types.Slot(numOfNodes - 1),
root: indexToHash(uint64(numOfNodes - 1)),
bestDescendant: NonExistentNode,
bestChild: NonExistentNode,
parent: uint64(numOfNodes - 2),
})
s := f.store
s.pruneThreshold = 0
s := &Store{nodes: nodes, nodesIndices: indices}
syncedTips := &optimisticStore{}
// Finalized root is at index 11 so everything before 11 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
require.NoError(t, s.prune(context.Background(), indexToHash(10), syncedTips))
assert.Equal(t, 90, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 90, len(s.nodesIndices), "Incorrect node indices count")
// One more time.
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
require.NoError(t, s.prune(context.Background(), indexToHash(20), syncedTips))
assert.Equal(t, 80, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 80, len(s.nodesIndices), "Incorrect node indices count")
}
// This unit tests starts with a simple branch like this
@@ -202,15 +491,41 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
//
// And we finalize 1. As a result only 1 should survive
func TestStore_Prune_NoDanglingBranch(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
f.store.pruneThreshold = 0
s := f.store
require.NoError(t, s.prune(context.Background(), indexToHash(1)))
require.Equal(t, len(s.nodeByRoot), 1)
nodes := []*Node{
{
slot: 100,
bestChild: 1,
bestDescendant: 1,
root: indexToHash(uint64(0)),
parent: NonExistentNode,
},
{
slot: 101,
root: indexToHash(uint64(1)),
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
parent: 0,
},
{
slot: 101,
root: indexToHash(uint64(2)),
parent: 0,
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
},
}
syncedTips := &optimisticStore{}
s := &Store{
pruneThreshold: 0,
nodes: nodes,
nodesIndices: map[[32]byte]uint64{
indexToHash(uint64(0)): 0,
indexToHash(uint64(1)): 1,
indexToHash(uint64(2)): 2,
},
}
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1)), syncedTips))
require.Equal(t, len(s.nodes), 1)
}
// This test starts with the following branching diagram
@@ -225,58 +540,209 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
// J -- K -- L
//
//
func TestStore_tips(t *testing.T) {
// Synced tips are B, D and E. And we finalize F. All that is left in fork
// choice is F, and the only synced tip left is E which is now away from Fork
// Choice.
func TestStore_PruneSyncedTips(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
expectedMap := map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'i'}: 106,
[32]byte{'l'}: 106,
[32]byte{'j'}: 102,
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{}, 1, 1))
syncedTips := &optimisticStore{
validatedTips: map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 104,
},
}
roots, slots := f.store.tips()
for i, r := range roots {
expectedSlot, ok := expectedMap[r]
require.Equal(t, true, ok)
require.Equal(t, slots[i], expectedSlot)
f.syncedTips = syncedTips
f.store.pruneThreshold = 0
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
require.Equal(t, 1, len(f.syncedTips.validatedTips))
_, ok := f.syncedTips.validatedTips[[32]byte{'e'}]
require.Equal(t, true, ok)
}
func TestStore_LeadsToViableHead(t *testing.T) {
tests := []struct {
n *Node
justifiedEpoch types.Epoch
finalizedEpoch types.Epoch
want bool
}{
{&Node{}, 0, 0, true},
{&Node{}, 1, 0, false},
{&Node{}, 0, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
s := &Store{
justifiedEpoch: tc.justifiedEpoch,
finalizedEpoch: tc.finalizedEpoch,
nodes: []*Node{tc.n},
}
got, err := s.leadsToViableHead(tc.n)
require.NoError(t, err)
assert.Equal(t, tc.want, got)
}
}
func TestStore_PruneMapsNodes(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
s := f.store
s.pruneThreshold = 0
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
require.Equal(t, len(s.nodeByRoot), 1)
func TestStore_ViableForHead(t *testing.T) {
tests := []struct {
n *Node
justifiedEpoch types.Epoch
finalizedEpoch types.Epoch
want bool
}{
{&Node{}, 0, 0, true},
{&Node{}, 1, 0, false},
{&Node{}, 0, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
s := &Store{
justifiedEpoch: tc.justifiedEpoch,
finalizedEpoch: tc.finalizedEpoch,
}
assert.Equal(t, tc.want, s.viableForHead(tc.n))
}
}
func TestStore_HasParent(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, false))
require.Equal(t, false, f.HasParent(params.BeaconConfig().ZeroHash))
require.Equal(t, true, f.HasParent(indexToHash(1)))
require.Equal(t, true, f.HasParent(indexToHash(2)))
require.Equal(t, true, f.HasParent(indexToHash(3)))
require.Equal(t, false, f.HasParent(indexToHash(4)))
tests := []struct {
m map[[32]byte]uint64
n []*Node
r [32]byte
want bool
}{
{r: [32]byte{'a'}, want: false},
{m: map[[32]byte]uint64{{'a'}: 0}, r: [32]byte{'a'}, want: false},
{m: map[[32]byte]uint64{{'a'}: 0}, r: [32]byte{'a'},
n: []*Node{{parent: NonExistentNode}}, want: false},
{m: map[[32]byte]uint64{{'a'}: 0},
n: []*Node{{parent: 0}}, r: [32]byte{'a'},
want: true},
}
for _, tc := range tests {
f := &ForkChoice{store: &Store{
nodesIndices: tc.m,
nodes: tc.n,
}}
assert.Equal(t, tc.want, f.HasParent(tc.r))
}
}
func TestStore_AncestorRoot(t *testing.T) {
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.nodesIndices = map[[32]byte]uint64{}
_, err := f.AncestorRoot(ctx, [32]byte{'a'}, 0)
assert.ErrorContains(t, "node does not exist", err)
f.store.nodesIndices[[32]byte{'a'}] = 0
_, err = f.AncestorRoot(ctx, [32]byte{'a'}, 0)
assert.ErrorContains(t, "node index out of range", err)
f.store.nodesIndices[[32]byte{'b'}] = 1
f.store.nodesIndices[[32]byte{'c'}] = 2
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
r, err := f.AncestorRoot(ctx, [32]byte{'c'}, 1)
require.NoError(t, err)
assert.Equal(t, bytesutil.ToBytes32(r), [32]byte{'a'})
r, err = f.AncestorRoot(ctx, [32]byte{'c'}, 2)
require.NoError(t, err)
assert.Equal(t, bytesutil.ToBytes32(r), [32]byte{'b'})
}
func TestStore_AncestorRootOutOfBound(t *testing.T) {
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.nodesIndices = map[[32]byte]uint64{}
_, err := f.AncestorRoot(ctx, [32]byte{'a'}, 0)
assert.ErrorContains(t, "node does not exist", err)
f.store.nodesIndices[[32]byte{'a'}] = 0
_, err = f.AncestorRoot(ctx, [32]byte{'a'}, 0)
assert.ErrorContains(t, "node index out of range", err)
f.store.nodesIndices[[32]byte{'b'}] = 1
f.store.nodesIndices[[32]byte{'c'}] = 2
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 100}, // Out of bound parent.
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
_, err = f.AncestorRoot(ctx, [32]byte{'c'}, 1)
require.ErrorContains(t, "node index out of range", err)
}
func TestStore_UpdateCanonicalNodes_WholeList(t *testing.T) {
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.canonicalNodes = map[[32]byte]bool{}
f.store.nodesIndices = map[[32]byte]uint64{}
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
f.store.nodesIndices[[32]byte{'c'}] = 2
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
require.Equal(t, len(f.store.nodes), len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical([32]byte{'c'}))
require.Equal(t, true, f.IsCanonical([32]byte{'b'}))
require.Equal(t, true, f.IsCanonical([32]byte{'c'}))
require.DeepEqual(t, f.Node([32]byte{'c'}), f.store.nodes[2])
require.Equal(t, f.Node([32]byte{'d'}), (*Node)(nil))
}
func TestStore_UpdateCanonicalNodes_ParentAlreadyIn(t *testing.T) {
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.canonicalNodes = map[[32]byte]bool{}
f.store.nodesIndices = map[[32]byte]uint64{}
f.store.nodes = []*Node{
{},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
f.store.nodesIndices[[32]byte{'c'}] = 2
f.store.canonicalNodes[[32]byte{'b'}] = true
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
require.Equal(t, len(f.store.nodes)-1, len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical([32]byte{'c'}))
require.Equal(t, true, f.IsCanonical([32]byte{'b'}))
}
func TestStore_UpdateCanonicalNodes_ContextCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
f := &ForkChoice{store: &Store{}}
f.store.canonicalNodes = map[[32]byte]bool{}
f.store.nodesIndices = map[[32]byte]uint64{}
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
f.store.nodesIndices[[32]byte{'c'}] = 2
cancel()
require.ErrorContains(t, "context canceled", f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
}

View File

@@ -9,23 +9,25 @@ import (
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
type ForkChoice struct {
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
balances []uint64 // tracks individual validator's last justified balances.
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
balances []uint64 // tracks individual validator's last justified balances.
syncedTips *optimisticStore
}
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
justifiedEpoch types.Epoch // latest justified epoch in store.
finalizedEpoch types.Epoch // latest finalized epoch in store.
pruneThreshold uint64 // do not prune tree unless threshold is reached.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
pruneThreshold uint64 // do not prune tree unless threshold is reached.
justifiedEpoch types.Epoch // latest justified epoch in store.
finalizedEpoch types.Epoch // latest finalized epoch in store.
finalizedRoot [fieldparams.RootLength]byte // latest finalized root in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
nodes []*Node // list of block nodes, each node is a representation of one block.
nodesIndices map[[fieldparams.RootLength]byte]uint64 // the root of block node and the nodes index in the list.
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
}
@@ -35,14 +37,20 @@ type Store struct {
type Node struct {
slot types.Slot // slot of the block converted to the node.
root [fieldparams.RootLength]byte // root of the block converted to the node.
parent *Node // parent index of this node.
children []*Node // the list of direct children of this Node
parent uint64 // parent index of this node.
justifiedEpoch types.Epoch // justifiedEpoch of this node.
finalizedEpoch types.Epoch // finalizedEpoch of this node.
balance uint64 // the balance that voted for this node directly
weight uint64 // weight of this node: the total balance including children
bestDescendant *Node // bestDescendant node of this node.
optimistic bool // whether the block has been fully validated or not
weight uint64 // weight of this node.
bestChild uint64 // bestChild index of this node.
bestDescendant uint64 // bestDescendant of this node.
graffiti [fieldparams.RootLength]byte // graffiti of the block node.
}
// optimisticStore defines a structure that tracks the tips of the fully
// validated blocks tree.
type optimisticStore struct {
validatedTips map[[32]byte]types.Slot
sync.RWMutex
}
// Vote defines an individual validator's vote.
@@ -51,3 +59,6 @@ type Vote struct {
nextRoot [fieldparams.RootLength]byte // next voting root.
nextEpoch types.Epoch // epoch of next voting period.
}
// NonExistentNode defines an unknown node which is used for the array based stateful DAG.
const NonExistentNode = ^uint64(0)

View File

@@ -12,6 +12,7 @@ import (
func TestVotes_CanFindHead(t *testing.T) {
balances := []uint64{1, 1}
f := setup(1, 1)
syncedTips := &optimisticStore{}
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
@@ -22,7 +23,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 0
// /
// 2 <- head
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -32,7 +33,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 0
// / \
// head -> 2 1
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -62,7 +63,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// head -> 2 1
// |
// 3
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), [32]byte{}, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -98,7 +99,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 3
// |
// 4 <- head
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(3), [32]byte{}, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -114,7 +115,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4 <- head
// /
// 5 <- justified epoch = 2
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), [32]byte{}, 2, 2))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -130,7 +131,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4 <- head
// / \
// 5 6 <- justified epoch = 0
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), [32]byte{}, 1, 1))
// Moved 2 votes to block 5:
// 0
@@ -142,7 +143,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4
// / \
// 2 votes-> 5 6
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), [32]byte{}, 1, 1))
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
@@ -163,9 +164,9 @@ func TestVotes_CanFindHead(t *testing.T) {
// 8
// |
// 9
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(7), indexToHash(5), [32]byte{}, 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(8), indexToHash(7), [32]byte{}, 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(9), indexToHash(8), [32]byte{}, 2, 2))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -210,7 +211,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 2 votes->9 10
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(10), indexToHash(8), [32]byte{}, 2, 2))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
@@ -248,8 +249,8 @@ func TestVotes_CanFindHead(t *testing.T) {
// Verify pruning below the prune threshold does not affect head.
f.store.pruneThreshold = 1000
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 11, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
assert.Equal(t, 11, len(f.store.nodes), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
@@ -272,8 +273,8 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 9 10
f.store.pruneThreshold = 1
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
@@ -289,7 +290,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 9 10
// |
// head-> 11
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(11), indexToHash(9), [32]byte{}, 2, 2))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)

View File

@@ -154,80 +154,65 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
if err != nil {
return nil, err
}
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, err
}
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, err
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(); err != nil {
return nil, err
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
log.Debugln("Registering POW Chain Service")
if err := beacon.registerPOWChainService(); err != nil {
return nil, err
}
log.Debugln("Registering Attestation Pool Service")
if err := beacon.registerAttestationPool(); err != nil {
return nil, err
}
log.Debugln("Registering Determinstic Genesis Service")
if err := beacon.registerDeterminsticGenesisService(); err != nil {
return nil, err
}
log.Debugln("Starting Fork Choice")
beacon.startForkChoice()
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(); err != nil {
return nil, err
}
log.Debugln("Registering Intial Sync Service")
if err := beacon.registerInitialSyncService(); err != nil {
return nil, err
}
log.Debugln("Registering Sync Service")
if err := beacon.registerSyncService(); err != nil {
return nil, err
}
log.Debugln("Registering Slasher Service")
if err := beacon.registerSlasherService(); err != nil {
return nil, err
}
log.Debugln("Registering RPC Service")
if err := beacon.registerRPCService(); err != nil {
return nil, err
}
log.Debugln("Registering GRPC Gateway Service")
if err := beacon.registerGRPCGateway(); err != nil {
return nil, err
}
log.Debugln("Registering Validator Monitoring Service")
if err := beacon.registerValidatorMonitorService(); err != nil {
return nil, err
}
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
log.Debugln("Registering Prometheus Service")
if err := beacon.registerPrometheusService(cliCtx); err != nil {
return nil, err
}
@@ -310,7 +295,7 @@ func (b *BeaconNode) Close() {
}
func (b *BeaconNode) startForkChoice() {
f := protoarray.New(0, 0)
f := protoarray.New(0, 0, params.BeaconConfig().ZeroHash)
b.forkChoiceStore = f
}
@@ -808,6 +793,8 @@ func (b *BeaconNode) registerPrometheusService(cliCtx *cli.Context) error {
)
}
additionalHandlers = append(additionalHandlers, prometheus.Handler{Path: "/tree", Handler: c.TreeHandler})
service := prometheus.NewService(
fmt.Sprintf("%s:%d", b.cliCtx.String(cmd.MonitoringHostFlag.Name), b.cliCtx.Int(flags.MonitoringPortFlag.Name)),
b.services,

View File

@@ -98,7 +98,7 @@ func (s *Service) PublishToTopic(ctx context.Context, topic string, data []byte,
// SubscribeToTopic joins (if necessary) and subscribes to PubSub topic.
func (s *Service) SubscribeToTopic(topic string, opts ...pubsub.SubOpt) (*pubsub.Subscription, error) {
s.awaitStateInitialized() // Genesis time and genesis validators root are required to subscribe.
s.awaitStateInitialized() // Genesis time and genesis validator root are required to subscribe.
topicHandle, err := s.JoinTopic(topic)
if err != nil {

View File

@@ -490,7 +490,7 @@ func (s *Service) connectToBootnodes() error {
return nil
}
// Returns true if the service is aware of the genesis time and genesis validators root. This is
// Returns true if the service is aware of the genesis time and genesis validator root. This is
// required for discovery and pubsub validation.
func (s *Service) isInitialized() bool {
return !s.genesisTime.IsZero() && len(s.genesisValidatorsRoot) == 32

View File

@@ -306,7 +306,7 @@ func TestService_JoinLeaveTopic(t *testing.T) {
// digest associated with that genesis event.
func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.Feed) [4]byte {
gt := prysmTime.Now()
gvr := bytesutil.PadTo([]byte("genesis validators root"), 32)
gvr := bytesutil.PadTo([]byte("genesis validator root"), 32)
for n := 0; n == 0; {
if ctx.Err() != nil {
t.Fatal(ctx.Err())

View File

@@ -27,13 +27,10 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
"//beacon-chain/powchain/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native/v1:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//container/trie:go_default_library",
"//contracts/deposit:go_default_library",

View File

@@ -1,4 +1,4 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
@@ -10,27 +10,8 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["client_test.go"],
embed = [":go_default_library"],
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -4,16 +4,12 @@
package v1
import (
"bytes"
"context"
"math/big"
"net/url"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
@@ -24,12 +20,6 @@ const (
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
// GetPayloadMethod v1 request string for JSON-RPC.
GetPayloadMethod = "engine_getPayloadV1"
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
// ExecutionBlockByHashMethod request string for JSON-RPC.
ExecutionBlockByHashMethod = "eth_getBlockByHash"
// ExecutionBlockByNumberMethod request string for JSON-RPC.
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
// DefaultTimeout for HTTP.
DefaultTimeout = time.Second * 5
)
@@ -37,23 +27,8 @@ const (
// ForkchoiceUpdatedResponse is the response kind received by the
// engine_forkchoiceUpdatedV1 endpoint.
type ForkchoiceUpdatedResponse struct {
Status *pb.PayloadStatus `json:"payloadStatus"`
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
}
// EngineCaller defines a client that can interact with an Ethereum
// execution node's engine service via JSON-RPC.
type EngineCaller interface {
NewPayload(ctx context.Context, payload *pb.ExecutionPayload) (*pb.PayloadStatus, error)
ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
) (*ForkchoiceUpdatedResponse, error)
GetPayload(ctx context.Context, payloadId [8]byte) (*pb.ExecutionPayload, error)
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) (*pb.TransitionConfiguration, error)
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
Status *pb.PayloadStatus `json:"status"`
PayloadId [8]byte `json:"payloadId"`
}
// Client defines a new engine API client for the Prysm consensus node
@@ -92,115 +67,19 @@ func New(ctx context.Context, endpoint string, opts ...Option) (*Client, error)
return c, nil
}
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
func (c *Client) NewPayload(ctx context.Context, payload *pb.ExecutionPayload) (*pb.PayloadStatus, error) {
result := &pb.PayloadStatus{}
err := c.rpc.CallContext(ctx, result, NewPayloadMethod, payload)
return result, handleRPCError(err)
// NewPayload --
func (*Client) NewPayload(_ context.Context, _ *pb.ExecutionPayload) (*pb.PayloadStatus, error) {
return nil, errors.New("unimplemented")
}
// ForkchoiceUpdated calls the engine_forkchoiceUpdatedV1 method via JSON-RPC.
func (c *Client) ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
// ForkchoiceUpdated --
func (*Client) ForkchoiceUpdated(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) (*ForkchoiceUpdatedResponse, error) {
result := &ForkchoiceUpdatedResponse{}
err := c.rpc.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, attrs)
return result, handleRPCError(err)
return nil, errors.New("unimplemented")
}
// GetPayload calls the engine_getPayloadV1 method via JSON-RPC.
func (c *Client) GetPayload(ctx context.Context, payloadId [8]byte) (*pb.ExecutionPayload, error) {
result := &pb.ExecutionPayload{}
err := c.rpc.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
return result, handleRPCError(err)
}
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
func (c *Client) ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) (*pb.TransitionConfiguration, error) {
// Terminal block number should be set to 0
zeroBigNum := big.NewInt(0)
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
result := &pb.TransitionConfiguration{}
if err := c.rpc.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
return nil, handleRPCError(err)
}
// We surface an error to the user if local configuration settings mismatch
// according to the response from the execution node.
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]
if !bytes.Equal(cfgTerminalHash, result.TerminalBlockHash) {
return nil, errors.Wrapf(
ErrMismatchTerminalBlockHash,
"got %#x from execution node, wanted %#x",
result.TerminalBlockHash,
cfgTerminalHash,
)
}
ttdCfg := params.BeaconConfig().TerminalTotalDifficulty
if ttdCfg != result.TerminalTotalDifficulty {
return nil, errors.Wrapf(
ErrMismatchTerminalTotalDiff,
"got %s from execution node, wanted %s",
result.TerminalTotalDifficulty,
ttdCfg,
)
}
return result, nil
}
// LatestExecutionBlock fetches the latest execution engine block by calling
// eth_blockByNumber via JSON-RPC.
func (c *Client) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
result := &pb.ExecutionBlock{}
err := c.rpc.CallContext(
ctx,
result,
ExecutionBlockByNumberMethod,
"latest",
false, /* no full transaction objects */
)
return result, handleRPCError(err)
}
// ExecutionBlockByHash fetches an execution engine block by hash by calling
// eth_blockByHash via JSON-RPC.
func (c *Client) ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error) {
result := &pb.ExecutionBlock{}
err := c.rpc.CallContext(ctx, result, ExecutionBlockByHashMethod, hash, false /* no full transaction objects */)
return result, handleRPCError(err)
}
// Handles errors received from the RPC server according to the specification.
func handleRPCError(err error) error {
if err == nil {
return nil
}
e, ok := err.(rpc.Error)
if !ok {
return errors.Wrap(err, "got an unexpected error")
}
switch e.ErrorCode() {
case -32700:
return ErrParse
case -32600:
return ErrInvalidRequest
case -32601:
return ErrMethodNotFound
case -32602:
return ErrInvalidParams
case -32603:
return ErrInternal
case -32001:
return ErrUnknownPayload
case -32000:
// Only -32000 status codes are data errors in the RPC specification.
errWithData, ok := err.(rpc.DataError)
if !ok {
return errors.Wrap(err, "got an unexpected error")
}
return errors.Wrapf(ErrServer, "%v", errWithData.ErrorData())
default:
return err
}
// GetPayload --
func (*Client) GetPayload(_ context.Context, _ [8]byte) (*pb.ExecutionPayload, error) {
return nil, errors.New("unimplemented")
}

View File

@@ -1,668 +0,0 @@
package v1
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/prysmaticlabs/prysm/testing/require"
"google.golang.org/protobuf/proto"
)
var _ = EngineCaller(&Client{})
func TestClient_IPC(t *testing.T) {
server := newTestIPCServer(t)
defer server.Stop()
rpcClient := rpc.DialInProc(server)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
ctx := context.Background()
fix := fixtures()
t.Run(GetPayloadMethod, func(t *testing.T) {
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
payloadId := [8]byte{1}
resp, err := client.GetPayload(ctx, payloadId)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
resp, err := client.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, &pb.PayloadAttributes{})
require.NoError(t, err)
require.DeepEqual(t, want.Status, resp.Status)
require.DeepEqual(t, want.PayloadId, resp.PayloadId)
})
t.Run(NewPayloadMethod, func(t *testing.T) {
want, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
req, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
resp, err := client.NewPayload(ctx, req)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(NewPayloadMethod, func(t *testing.T) {
want, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
req, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
resp, err := client.NewPayload(ctx, req)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
resp, err := client.ExchangeTransitionConfiguration(ctx, want)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
require.Equal(t, true, ok)
resp, err := client.LatestExecutionBlock(ctx)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
require.Equal(t, true, ok)
arg := common.BytesToHash([]byte("foo"))
resp, err := client.ExecutionBlockByHash(ctx, arg)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
}
func TestClient_HTTP(t *testing.T) {
ctx := context.Background()
fix := fixtures()
t.Run(GetPayloadMethod, func(t *testing.T) {
payloadId := [8]byte{1}
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(reqArg),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.GetPayload(ctx, payloadId)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
FinalizedBlockHash: []byte("finalized"),
}
payloadAttributes := &pb.PayloadAttributes{
Timestamp: 1,
Random: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
forkChoiceStateReq, err := json.Marshal(forkChoiceState)
require.NoError(t, err)
payloadAttrsReq, err := json.Marshal(payloadAttributes)
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(forkChoiceStateReq),
))
require.Equal(t, true, strings.Contains(
jsonRequestString, string(payloadAttrsReq),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
require.NoError(t, err)
require.DeepEqual(t, want.Status, resp.Status)
require.DeepEqual(t, want.PayloadId, resp.PayloadId)
})
t.Run(NewPayloadMethod, func(t *testing.T) {
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
want, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
reqArg, err := json.Marshal(execPayload)
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(reqArg),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.NewPayload(ctx, execPayload)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err := json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.LatestExecutionBlock(ctx)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
encodedReq, err := json.Marshal(want)
require.NoError(t, err)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(encodedReq),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.ExchangeTransitionConfiguration(ctx, want)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
arg := common.BytesToHash([]byte("foo"))
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, fmt.Sprintf("%#x", arg),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.ExecutionBlockByHash(ctx, arg)
require.NoError(t, err)
require.DeepEqual(t, want, resp)
})
}
func TestExchangeTransitionConfiguration(t *testing.T) {
fix := fixtures()
ctx := context.Background()
t.Run("wrong terminal block hash", func(t *testing.T) {
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
// Change the terminal block hash.
h := common.BytesToHash([]byte("foo"))
resp.TerminalBlockHash = h[:]
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": resp,
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
_, err = client.ExchangeTransitionConfiguration(ctx, request)
require.Equal(t, true, errors.Is(err, ErrMismatchTerminalBlockHash))
})
t.Run("wrong terminal total difficulty", func(t *testing.T) {
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
// Change the terminal block hash.
resp.TerminalTotalDifficulty = "bar"
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": resp,
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Client{}
client.rpc = rpcClient
_, err = client.ExchangeTransitionConfiguration(ctx, request)
require.Equal(t, true, errors.Is(err, ErrMismatchTerminalTotalDiff))
})
}
type customError struct {
code int
}
func (c *customError) ErrorCode() int {
return c.code
}
func (*customError) Error() string {
return "something went wrong"
}
type dataError struct {
code int
data interface{}
}
func (c *dataError) ErrorCode() int {
return c.code
}
func (*dataError) Error() string {
return "something went wrong"
}
func (c *dataError) ErrorData() interface{} {
return c.data
}
func Test_handleRPCError(t *testing.T) {
got := handleRPCError(nil)
require.Equal(t, true, got == nil)
var tests = []struct {
name string
expected error
expectedContains string
given error
}{
{
name: "not an rpc error",
expectedContains: "got an unexpected error",
given: errors.New("foo"),
},
{
name: "ErrParse",
expectedContains: ErrParse.Error(),
given: &customError{code: -32700},
},
{
name: "ErrInvalidRequest",
expectedContains: ErrInvalidRequest.Error(),
given: &customError{code: -32600},
},
{
name: "ErrMethodNotFound",
expectedContains: ErrMethodNotFound.Error(),
given: &customError{code: -32601},
},
{
name: "ErrInvalidParams",
expectedContains: ErrInvalidParams.Error(),
given: &customError{code: -32602},
},
{
name: "ErrInternal",
expectedContains: ErrInternal.Error(),
given: &customError{code: -32603},
},
{
name: "ErrUnknownPayload",
expectedContains: ErrUnknownPayload.Error(),
given: &customError{code: -32001},
},
{
name: "ErrServer unexpected no data",
expectedContains: "got an unexpected error",
given: &customError{code: -32000},
},
{
name: "ErrServer with data",
expectedContains: ErrServer.Error(),
given: &dataError{code: -32000, data: 5},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := handleRPCError(tt.given)
require.ErrorContains(t, tt.expectedContains, got)
})
}
}
func newTestIPCServer(t *testing.T) *rpc.Server {
server := rpc.NewServer()
err := server.RegisterName("engine", new(testEngineService))
require.NoError(t, err)
err = server.RegisterName("eth", new(testEngineService))
require.NoError(t, err)
return server
}
func fixtures() map[string]interface{} {
foo := bytesutil.ToBytes32([]byte("foo"))
bar := bytesutil.PadTo([]byte("bar"), 20)
baz := bytesutil.PadTo([]byte("baz"), 256)
baseFeePerGas := big.NewInt(6)
executionPayloadFixture := &pb.ExecutionPayload{
ParentHash: foo[:],
FeeRecipient: bar,
StateRoot: foo[:],
ReceiptsRoot: foo[:],
LogsBloom: baz,
Random: foo[:],
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: foo[:],
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
BlockHash: foo[:],
Transactions: [][]byte{foo[:]},
}
number := bytesutil.PadTo([]byte("100"), fieldparams.RootLength)
hash := bytesutil.PadTo([]byte("hash"), fieldparams.RootLength)
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
stateRoot := bytesutil.PadTo([]byte("stateRoot"), fieldparams.RootLength)
transactionsRoot := bytesutil.PadTo([]byte("transactionsRoot"), fieldparams.RootLength)
receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength)
logsBloom := bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength)
executionBlock := &pb.ExecutionBlock{
Number: number,
Hash: hash,
ParentHash: parent,
Sha3Uncles: sha3Uncles,
Miner: miner,
StateRoot: stateRoot,
TransactionsRoot: transactionsRoot,
ReceiptsRoot: receiptsRoot,
LogsBloom: logsBloom,
Difficulty: bytesutil.PadTo([]byte("1"), fieldparams.RootLength),
TotalDifficulty: bytesutil.PadTo([]byte("2"), fieldparams.RootLength),
GasLimit: 3,
GasUsed: 4,
Timestamp: 5,
Size: bytesutil.PadTo([]byte("6"), fieldparams.RootLength),
ExtraData: bytesutil.PadTo([]byte("extraData"), fieldparams.RootLength),
BaseFeePerGas: bytesutil.PadTo([]byte("baseFeePerGas"), fieldparams.RootLength),
Transactions: [][]byte{foo[:]},
Uncles: [][]byte{foo[:]},
}
status := &pb.PayloadStatus{
Status: pb.PayloadStatus_ACCEPTED,
LatestValidHash: foo[:],
ValidationError: "",
}
id := pb.PayloadIDBytes([8]byte{1, 0, 0, 0, 0, 0, 0, 0})
forkChoiceResp := &ForkchoiceUpdatedResponse{
Status: status,
PayloadId: &id,
}
transitionCfg := &pb.TransitionConfiguration{
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
TerminalTotalDifficulty: params.BeaconConfig().TerminalTotalDifficulty,
TerminalBlockNumber: big.NewInt(0).Bytes(),
}
return map[string]interface{}{
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"PayloadStatus": status,
"ForkchoiceUpdatedResponse": forkChoiceResp,
"TransitionConfiguration": transitionCfg,
}
}
type testEngineService struct{}
func (*testEngineService) NoArgsRets() {}
func (*testEngineService) GetBlockByHash(
_ context.Context, _ common.Hash, _ bool,
) *pb.ExecutionBlock {
fix := fixtures()
item, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
if !ok {
panic("not found")
}
return item
}
func (*testEngineService) GetBlockByNumber(
_ context.Context, _ string, _ bool,
) *pb.ExecutionBlock {
fix := fixtures()
item, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
if !ok {
panic("not found")
}
return item
}
func (*testEngineService) GetPayloadV1(
_ context.Context, _ pb.PayloadIDBytes,
) *pb.ExecutionPayload {
fix := fixtures()
item, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
if !ok {
panic("not found")
}
return item
}
func (*testEngineService) ExchangeTransitionConfigurationV1(
_ context.Context, _ *pb.TransitionConfiguration,
) *pb.TransitionConfiguration {
fix := fixtures()
item, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
if !ok {
panic("not found")
}
return item
}
func (*testEngineService) ForkchoiceUpdatedV1(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) *ForkchoiceUpdatedResponse {
fix := fixtures()
item, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
if !ok {
panic("not found")
}
return item
}
func (*testEngineService) NewPayloadV1(
_ context.Context, _ *pb.ExecutionPayload,
) *pb.PayloadStatus {
fix := fixtures()
item, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
if !ok {
panic("not found")
}
return item
}

View File

@@ -19,10 +19,4 @@ var (
ErrUnknownPayload = errors.New("payload does not exist or is not available")
// ErrUnsupportedScheme for unsupported URL schemes.
ErrUnsupportedScheme = errors.New("unsupported url scheme, only http(s) and ipc are supported")
// ErrMismatchTerminalBlockHash when the terminal block hash value received via
// the API mismatches Prysm's configuration value.
ErrMismatchTerminalBlockHash = errors.New("terminal block hash mismatch")
// ErrMismatchTerminalTotalDiff when the terminal total difficulty value received via
// the API mismatches Prysm's configuration value.
ErrMismatchTerminalTotalDiff = errors.New("terminal total difficulty mismatch")
)

View File

@@ -428,27 +428,27 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
}
func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int) ([32]byte, uint64, error) {
bHash, err := s.BlockHashByHeight(ctx, blkNum)
hash, err := s.BlockHashByHeight(ctx, blkNum)
if err != nil {
return [32]byte{}, 0, errors.Wrap(err, "could not get eth1 block hash")
}
if bHash == [32]byte{} {
if hash == [32]byte{} {
return [32]byte{}, 0, errors.Wrap(err, "got empty block hash")
}
timeStamp, err := s.BlockTimeByHeight(ctx, blkNum)
if err != nil {
return [32]byte{}, 0, errors.Wrap(err, "could not get block timestamp")
}
return bHash, timeStamp, nil
return hash, timeStamp, nil
}
// checkBlockNumberForChainStart checks the given block number for if chainstart has occurred.
func (s *Service) checkBlockNumberForChainStart(ctx context.Context, blkNum *big.Int) error {
bHash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
hash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
if err != nil {
return err
}
s.checkForChainstart(ctx, bHash, blkNum, timeStamp)
s.checkForChainstart(ctx, hash, blkNum, timeStamp)
return nil
}

View File

@@ -333,7 +333,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
err = web3Service.ProcessETH1Block(context.Background(), big.NewInt(int64(logs[len(logs)-1].BlockNumber)))
require.NoError(t, err)
cachedDeposits := web3Service.chainStartData.ChainstartDeposits
cachedDeposits := web3Service.ChainStartDeposits()
require.Equal(t, depositsReqForChainStart, len(cachedDeposits))
// Receive the chain started event.
@@ -425,7 +425,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
err = web3Service.processPastLogs(context.Background())
require.NoError(t, err)
cachedDeposits := web3Service.chainStartData.ChainstartDeposits
cachedDeposits := web3Service.ChainStartDeposits()
requiredDepsForChainstart := depositsReqForChainStart + depositOffset
require.Equal(t, requiredDepsForChainstart, len(cachedDeposits), "Did not cache the chain start deposits correctly")
@@ -529,7 +529,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
err = web3Service.processPastLogs(context.Background())
require.NoError(t, err)
cachedDeposits := web3Service.chainStartData.ChainstartDeposits
cachedDeposits := web3Service.ChainStartDeposits()
require.Equal(t, totalNumOfDeposits, len(cachedDeposits), "Did not cache the chain start deposits correctly")
// Receive the chain started event.

View File

@@ -32,14 +32,6 @@ func WithHttpEndpoints(endpointStrings []string) Option {
}
}
// WithExecutionEndpoint for the execution node JSON-RPC endpoint.
func WithExecutionEndpoint(endpoint string) Option {
return func(s *Service) error {
s.cfg.executionEndpoint = endpoint
return nil
}
}
// WithDepositContractAddress for the deposit contract.
func WithDepositContractAddress(addr common.Address) Option {
return func(s *Service) error {

View File

@@ -27,13 +27,10 @@ import (
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
nativev1 "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
@@ -81,6 +78,7 @@ var (
// ChainStartFetcher retrieves information pertaining to the chain start event
// of the beacon chain for usage across various services.
type ChainStartFetcher interface {
ChainStartDeposits() []*ethpb.Deposit
ChainStartEth1Data() *ethpb.Eth1Data
PreGenesisState() state.BeaconState
ClearPreGenesisData()
@@ -135,7 +133,6 @@ type config struct {
eth1HeaderReqLimit uint64
beaconNodeStatsUpdater BeaconNodeStatsUpdater
httpEndpoints []network.Endpoint
executionEndpoint string
currHttpEndpoint network.Endpoint
finalizedStateAtStartup state.BeaconState
}
@@ -156,7 +153,6 @@ type Service struct {
headTicker *time.Ticker
httpLogger bind.ContractFilterer
eth1DataFetcher RPCDataFetcher
engineAPIClient *engine.Client
rpcClient RPCClient
headerCache *headerCache // cache to store block hash/block height.
latestEth1Data *ethpb.LatestETH1Data
@@ -212,10 +208,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
}
}
if err := s.initializeEngineAPIClient(ctx); err != nil {
return nil, errors.Wrap(err, "unable to initialize engine API client")
}
if err := s.ensureValidPowchainData(ctx); err != nil {
return nil, errors.Wrap(err, "unable to validate powchain data")
}
@@ -270,14 +262,16 @@ func (s *Service) Stop() error {
return nil
}
// ChainStartDeposits returns a slice of validator deposit data processed
// by the deposit contract and cached in the powchain service.
func (s *Service) ChainStartDeposits() []*ethpb.Deposit {
return s.chainStartData.ChainstartDeposits
}
// ClearPreGenesisData clears out the stored chainstart deposits and beacon state.
func (s *Service) ClearPreGenesisData() {
s.chainStartData.ChainstartDeposits = []*ethpb.Deposit{}
if features.Get().EnableNativeState {
s.preGenesisState = &nativev1.BeaconState{}
} else {
s.preGenesisState = &v1.BeaconState{}
}
s.preGenesisState = &v1.BeaconState{}
}
// ChainStartEth1Data returns the eth1 data at chainstart.
@@ -304,12 +298,6 @@ func (s *Service) Status() error {
return nil
}
// EngineAPIClient returns the associated engine API client to interact
// with an execution node via JSON-RPC.
func (s *Service) EngineAPIClient() *engine.Client {
return s.engineAPIClient
}
func (s *Service) updateBeaconNodeStats() {
bs := clientstats.BeaconNodeStats{}
if len(s.cfg.httpEndpoints) > 1 {
@@ -375,6 +363,45 @@ func (s *Service) ETH1ConnectionErrors() []error {
return errs
}
// DepositRoot returns the Merkle root of the latest deposit trie
// from the ETH1.0 deposit contract.
func (s *Service) DepositRoot() [32]byte {
return s.depositTrie.HashTreeRoot()
}
// DepositTrie returns the sparse Merkle trie used for storing
// deposits from the ETH1.0 deposit contract.
func (s *Service) DepositTrie() *trie.SparseMerkleTrie {
return s.depositTrie
}
// LatestBlockHeight in the ETH1.0 chain.
func (s *Service) LatestBlockHeight() *big.Int {
return big.NewInt(int64(s.latestEth1Data.BlockHeight))
}
// LatestBlockHash in the ETH1.0 chain.
func (s *Service) LatestBlockHash() common.Hash {
return bytesutil.ToBytes32(s.latestEth1Data.BlockHash)
}
// AreAllDepositsProcessed determines if all the logs from the deposit contract
// are processed.
func (s *Service) AreAllDepositsProcessed() (bool, error) {
s.processingLock.RLock()
defer s.processingLock.RUnlock()
countByte, err := s.depositContractCaller.GetDepositCount(&bind.CallOpts{})
if err != nil {
return false, errors.Wrap(err, "could not get deposit count")
}
count := bytesutil.FromBytes8(countByte)
deposits := s.cfg.depositCache.AllDeposits(s.ctx, nil)
if count != uint64(len(deposits)) {
return false, nil
}
return true, nil
}
// refers to the latest eth1 block which follows the condition: eth1_timestamp +
// SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time
func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
@@ -771,20 +798,13 @@ func (s *Service) initPOWService() {
// Handle edge case with embedded genesis state by fetching genesis header to determine
// its height.
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 {
genHash := common.BytesToHash(s.chainStartData.Eth1Data.BlockHash)
genBlock := s.chainStartData.GenesisBlock
// In the event our provided chainstart data references a non-existent blockhash
// we assume the genesis block to be 0.
if genHash != [32]byte{} {
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
if err != nil {
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
s.retryETH1Node(err)
continue
}
genBlock = genHeader.Number.Uint64()
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, common.BytesToHash(s.chainStartData.Eth1Data.BlockHash))
if err != nil {
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
s.retryETH1Node(err)
continue
}
s.chainStartData.GenesisBlock = genBlock
s.chainStartData.GenesisBlock = genHeader.Number.Uint64()
if err := s.savePowchainData(ctx); err != nil {
log.Errorf("Unable to save powchain data: %v", err)
}
@@ -1053,19 +1073,6 @@ func (s *Service) ensureValidPowchainData(ctx context.Context) error {
return nil
}
// Initializes a connection to the engine API if an execution provider endpoint is set.
func (s *Service) initializeEngineAPIClient(ctx context.Context) error {
if s.cfg.executionEndpoint == "" {
return nil
}
client, err := engine.New(ctx, s.cfg.executionEndpoint)
if err != nil {
return err
}
s.engineAPIClient = client
return nil
}
func dedupEndpoints(endpoints []string) []string {
selectionMap := make(map[string]bool)
newEndpoints := make([]string, 0, len(endpoints))

View File

@@ -16,6 +16,7 @@ go_library(
"//beacon-chain/powchain/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
@@ -23,6 +24,5 @@ go_library(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -2,14 +2,15 @@ package testing
import (
"context"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/container/trie"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -24,6 +25,11 @@ func (_ *FaultyMockPOWChain) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
return 0, big.NewInt(0)
}
// LatestBlockHeight --
func (_ *FaultyMockPOWChain) LatestBlockHeight() *big.Int {
return big.NewInt(0)
}
// BlockExists --
func (f *FaultyMockPOWChain) BlockExists(_ context.Context, _ common.Hash) (bool, *big.Int, error) {
if f.HashesByHeight == nil {
@@ -48,6 +54,21 @@ func (_ *FaultyMockPOWChain) BlockByTimestamp(_ context.Context, _ uint64) (*typ
return &types.HeaderInfo{Number: big.NewInt(0)}, nil
}
// DepositRoot --
func (_ *FaultyMockPOWChain) DepositRoot() [32]byte {
return [32]byte{}
}
// DepositTrie --
func (_ *FaultyMockPOWChain) DepositTrie() *trie.SparseMerkleTrie {
return &trie.SparseMerkleTrie{}
}
// ChainStartDeposits --
func (_ *FaultyMockPOWChain) ChainStartDeposits() []*ethpb.Deposit {
return []*ethpb.Deposit{}
}
// ChainStartEth1Data --
func (_ *FaultyMockPOWChain) ChainStartEth1Data() *ethpb.Eth1Data {
return &ethpb.Eth1Data{}
@@ -55,11 +76,7 @@ func (_ *FaultyMockPOWChain) ChainStartEth1Data() *ethpb.Eth1Data {
// PreGenesisState --
func (_ *FaultyMockPOWChain) PreGenesisState() state.BeaconState {
s, err := v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{})
if err != nil {
panic("could not initialize state")
}
return s
return &v1.BeaconState{}
}
// ClearPreGenesisData --

View File

@@ -16,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -57,6 +58,11 @@ func (m *POWChain) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
return uint64(GenesisTime), blk
}
// DepositTrie --
func (_ *POWChain) DepositTrie() *trie.SparseMerkleTrie {
return &trie.SparseMerkleTrie{}
}
// BlockExists --
func (m *POWChain) BlockExists(_ context.Context, hash common.Hash) (bool, *big.Int, error) {
// Reverse the map of heights by hash.
@@ -101,6 +107,17 @@ func (m *POWChain) BlockByTimestamp(_ context.Context, time uint64) (*types.Head
return &types.HeaderInfo{Number: chosenNumber, Time: chosenTime}, nil
}
// DepositRoot --
func (_ *POWChain) DepositRoot() [32]byte {
root := []byte("depositroot")
return bytesutil.ToBytes32(root)
}
// ChainStartDeposits --
func (_ *POWChain) ChainStartDeposits() []*ethpb.Deposit {
return []*ethpb.Deposit{}
}
// ChainStartEth1Data --
func (m *POWChain) ChainStartEth1Data() *ethpb.Eth1Data {
return m.Eth1Data

View File

@@ -52,6 +52,7 @@ func TestGetSpec(t *testing.T) {
config.BellatrixForkEpoch = 101
config.ShardingForkVersion = []byte("ShardingForkVersion")
config.ShardingForkEpoch = 102
config.MinAnchorPowBlockDifficulty = 1000
config.BLSWithdrawalPrefixByte = byte('b')
config.GenesisDelay = 24
config.SecondsPerSlot = 25
@@ -99,7 +100,7 @@ func TestGetSpec(t *testing.T) {
config.MinSyncCommitteeParticipants = 71
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
config.TerminalBlockHashActivationEpoch = 72
config.TerminalTotalDifficulty = "73"
config.TerminalTotalDifficulty = 73
config.FeeRecipient = common.HexToAddress("FeeRecipient")
var dbp [4]byte
@@ -130,7 +131,7 @@ func TestGetSpec(t *testing.T) {
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 98, len(resp.Data))
assert.Equal(t, 100, len(resp.Data))
for k, v := range resp.Data {
switch k {
case "CONFIG_NAME":

Some files were not shown because too many files have changed in this diff Show More