mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
12 Commits
e2eProxy
...
consensus-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c245c55dc5 | ||
|
|
7f53700306 | ||
|
|
3b69f7a196 | ||
|
|
72562dcf3a | ||
|
|
cc23b8311a | ||
|
|
cbe54fe3f9 | ||
|
|
1b6adca3ca | ||
|
|
1651649e5a | ||
|
|
56187edb98 | ||
|
|
ecad5bbffc | ||
|
|
407182387b | ||
|
|
ad0b0b503d |
13
BUILD.bazel
13
BUILD.bazel
@@ -115,18 +115,19 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/comparesame:go_default_library",
|
||||
"//tools/analyzers/cryptorand:go_default_library",
|
||||
"//tools/analyzers/errcheck:go_default_library",
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/comparesame:go_default_library",
|
||||
"//tools/analyzers/shadowpredecl:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/slicedirect:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
"//tools/analyzers/recursivelock:go_default_library",
|
||||
"//tools/analyzers/shadowpredecl:go_default_library",
|
||||
"//tools/analyzers/slicedirect:go_default_library",
|
||||
"//tools/analyzers/uintcast:go_default_library",
|
||||
] + select({
|
||||
# nogo checks that fail with coverage enabled.
|
||||
|
||||
@@ -176,7 +176,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.17.6",
|
||||
go_version = "1.17.9",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
|
||||
@@ -66,6 +66,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
|
||||
@@ -273,13 +273,13 @@ func (s *Service) CurrentFork() *ethpb.Fork {
|
||||
|
||||
// IsCanonical returns true if the input block root is part of the canonical chain.
|
||||
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
// If the block has been finalized, the block will always be part of the canonical chain.
|
||||
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
|
||||
return true, nil
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
}
|
||||
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
// If the block has been finalized, the block will always be part of the canonical chain.
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/math"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
@@ -402,10 +403,17 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
|
||||
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
|
||||
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not cast eth1 deposit index")
|
||||
}
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
eth1DepositIndex -= 1
|
||||
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1516,6 +1516,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
@@ -1529,8 +1530,64 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
}
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 9, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(109))
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 7}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(6))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
gs2 := gs.Copy()
|
||||
assert.NoError(t, gs2.SetEth1Data(ðpb.Eth1Data{DepositCount: 15}))
|
||||
assert.NoError(t, gs2.SetEth1DepositIndex(13))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
Amount: 0,
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
// Insert 3 deposits before hand.
|
||||
depositCache.InsertFinalizedDeposits(ctx, 2)
|
||||
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(105))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
|
||||
// Insert New Finalized State with higher deposit count.
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
|
||||
fDeposits = depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
|
||||
@@ -83,7 +83,8 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
|
||||
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
|
||||
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
|
||||
// In this case, we could exit early to save on additional computation.
|
||||
if s.cfg.ForkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
|
||||
blockRoot := bytesutil.ToBytes32(root)
|
||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -137,6 +137,22 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
|
||||
depositTrie := dc.finalizedDeposits.Deposits
|
||||
insertIndex := int(dc.finalizedDeposits.MerkleTrieIndex + 1)
|
||||
|
||||
// Don't insert into finalized trie if there is no deposit to
|
||||
// insert.
|
||||
if len(dc.deposits) == 0 {
|
||||
return
|
||||
}
|
||||
// In the event we have less deposits than we need to
|
||||
// finalize we finalize till the index on which we do have it.
|
||||
if len(dc.deposits) <= int(eth1DepositIndex) {
|
||||
eth1DepositIndex = int64(len(dc.deposits)) - 1
|
||||
}
|
||||
// If we finalize to some lower deposit index, we
|
||||
// ignore it.
|
||||
if int(eth1DepositIndex) < insertIndex {
|
||||
return
|
||||
}
|
||||
for _, d := range dc.deposits {
|
||||
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
|
||||
continue
|
||||
|
||||
@@ -459,7 +459,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
Index: 1,
|
||||
},
|
||||
}
|
||||
newFinalizedDeposit := ethpb.DepositContainer{
|
||||
newFinalizedDeposit := ðpb.DepositContainer{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{2}, 48),
|
||||
@@ -471,17 +471,17 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
}
|
||||
dc.deposits = oldFinalizedDeposits
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
// Artificially exclude old deposits so that they can only be retrieved from previously finalized deposits.
|
||||
dc.deposits = []*ethpb.DepositContainer{&newFinalizedDeposit}
|
||||
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
|
||||
assert.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex)
|
||||
|
||||
var deps [][]byte
|
||||
for _, d := range append(oldFinalizedDeposits, &newFinalizedDeposit) {
|
||||
for _, d := range oldFinalizedDeposits {
|
||||
hash, err := d.Deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not hash deposit data")
|
||||
deps = append(deps, hash[:])
|
||||
@@ -491,6 +491,140 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{0}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 0,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 1,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{2}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 2,
|
||||
},
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
dc.InsertFinalizedDeposits(context.Background(), 5)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{0}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 0,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 1,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{2}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 2,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{3}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 3,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{4}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 4,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{5}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 5,
|
||||
},
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
dc.InsertFinalizedDeposits(context.Background(), 5)
|
||||
|
||||
// Reinsert finalized deposits with a lower index.
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
@@ -615,6 +749,85 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
assert.Equal(t, 1, len(deps))
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
generateCtr := func(height uint64, index int64) *ethpb.DepositContainer {
|
||||
return ðpb.DepositContainer{
|
||||
Eth1BlockHeight: height,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{uint8(index)}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: index,
|
||||
}
|
||||
}
|
||||
|
||||
finalizedDeposits := []*ethpb.DepositContainer{
|
||||
generateCtr(10, 0),
|
||||
generateCtr(11, 1),
|
||||
generateCtr(12, 2),
|
||||
generateCtr(12, 3),
|
||||
generateCtr(13, 4),
|
||||
generateCtr(13, 5),
|
||||
generateCtr(13, 6),
|
||||
generateCtr(14, 7),
|
||||
}
|
||||
dc.deposits = append(finalizedDeposits,
|
||||
generateCtr(15, 8),
|
||||
generateCtr(15, 9),
|
||||
generateCtr(30, 10))
|
||||
trieItems := make([][]byte, 0, len(dc.deposits))
|
||||
for _, dep := range dc.allDeposits(big.NewInt(30)) {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
trieItems = append(trieItems, depHash[:])
|
||||
}
|
||||
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
dc.InsertFinalizedDeposits(context.Background(), 10)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 3)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 4)
|
||||
|
||||
// Mimick finalized deposit trie fetch.
|
||||
fd := dc.FinalizedDeposits(context.Background())
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
|
||||
insertIndex := fd.MerkleTrieIndex + 1
|
||||
|
||||
for _, dep := range deps {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
dc.InsertFinalizedDeposits(context.Background(), 15)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 15)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 14)
|
||||
|
||||
fd = dc.FinalizedDeposits(context.Background())
|
||||
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(30))
|
||||
insertIndex = fd.MerkleTrieIndex + 1
|
||||
|
||||
for _, dep := range deps {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
assert.Equal(t, fd.Deposits.NumOfItems(), depositTrie.NumOfItems())
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -270,6 +270,7 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
|
||||
// Modified in Altair and Bellatrix.
|
||||
var inactivityDenominator uint64
|
||||
bias := cfg.InactivityScoreBias
|
||||
inactivityDenominator := bias * beaconState.InactivityPenaltyQuotient()
|
||||
switch beaconState.Version() {
|
||||
case version.Altair:
|
||||
inactivityDenominator = bias * cfg.InactivityPenaltyQuotientAltair
|
||||
|
||||
@@ -120,11 +120,16 @@ func (s *Service) handleExchangeConfigurationError(err error) {
|
||||
// Logs the terminal total difficulty status.
|
||||
func (s *Service) logTtdStatus(ctx context.Context, ttd *uint256.Int) (bool, error) {
|
||||
latest, err := s.LatestExecutionBlock(ctx)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, hexutil.ErrEmptyString):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
}
|
||||
if latest == nil {
|
||||
case latest == nil:
|
||||
return false, errors.New("latest block is nil")
|
||||
case latest.TotalDifficulty == "":
|
||||
return false, nil
|
||||
default:
|
||||
}
|
||||
latestTtd, err := hexutil.DecodeBig(latest.TotalDifficulty)
|
||||
if err != nil {
|
||||
|
||||
@@ -190,6 +190,38 @@ func TestService_logTtdStatus(t *testing.T) {
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := (*pb.ExecutionBlock)(nil) // Nil response when a client is not synced
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func emptyPayload() *pb.ExecutionPayload {
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
|
||||
@@ -10,9 +10,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -49,8 +52,8 @@ type EngineCaller interface {
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error
|
||||
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error)
|
||||
}
|
||||
|
||||
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
|
||||
@@ -174,6 +177,78 @@ func (s *Service) ExchangeTransitionConfiguration(
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
|
||||
//
|
||||
// Spec code:
|
||||
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
|
||||
// # `pow_chain` abstractly represents all blocks in the PoW chain
|
||||
// for block in pow_chain:
|
||||
// parent = pow_chain[block.parent_hash]
|
||||
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// if block_reached_ttd and not parent_reached_ttd:
|
||||
// return block
|
||||
//
|
||||
// return None
|
||||
func (s *Service) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
|
||||
if overflows {
|
||||
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
|
||||
}
|
||||
blk, err := s.LatestExecutionBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get latest execution block")
|
||||
}
|
||||
if blk == nil {
|
||||
return nil, false, errors.New("latest execution block is nil")
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return nil, false, ctx.Err()
|
||||
}
|
||||
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
|
||||
parentHash := bytesutil.ToBytes32(blk.ParentHash)
|
||||
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
if parentBlk == nil {
|
||||
return nil, false, errors.New("parent execution block is nil")
|
||||
}
|
||||
if blockReachedTTD {
|
||||
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
if !parentReachedTTD {
|
||||
log.WithFields(logrus.Fields{
|
||||
"number": blk.Number,
|
||||
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
|
||||
"td": blk.TotalDifficulty,
|
||||
"parentTd": parentBlk.TotalDifficulty,
|
||||
"ttd": terminalTotalDifficulty,
|
||||
}).Info("Retrieved terminal block hash")
|
||||
return blk.Hash, true, nil
|
||||
}
|
||||
} else {
|
||||
return nil, false, nil
|
||||
}
|
||||
blk = parentBlk
|
||||
}
|
||||
}
|
||||
|
||||
// LatestExecutionBlock fetches the latest execution engine block by calling
|
||||
// eth_blockByNumber via JSON-RPC.
|
||||
func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
|
||||
@@ -251,3 +326,15 @@ func isTimeout(e error) bool {
|
||||
t, ok := e.(httpTimeoutError)
|
||||
return ok && t.Timeout()
|
||||
}
|
||||
|
||||
func tDStringToUint256(td string) (*uint256.Int, error) {
|
||||
b, err := hexutil.DecodeBig(td)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i, overflows := uint256.FromBig(b)
|
||||
if overflows {
|
||||
return nil, errors.New("total difficulty overflowed")
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@@ -418,6 +418,155 @@ func TestClient_HTTP(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
paramsTd string
|
||||
currentPowBlock *pb.ExecutionBlock
|
||||
parentPowBlock *pb.ExecutionBlock
|
||||
errLatestExecutionBlk error
|
||||
wantTerminalBlockHash []byte
|
||||
wantExists bool
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "config td overflows",
|
||||
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
errString: "could not convert terminal total difficulty to uint256",
|
||||
},
|
||||
{
|
||||
name: "could not get latest execution block",
|
||||
paramsTd: "1",
|
||||
errLatestExecutionBlk: errors.New("blah"),
|
||||
errString: "could not get latest execution block",
|
||||
},
|
||||
{
|
||||
name: "nil latest execution block",
|
||||
paramsTd: "1",
|
||||
errString: "latest execution block is nil",
|
||||
},
|
||||
{
|
||||
name: "current execution block invalid TD",
|
||||
paramsTd: "1",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
},
|
||||
errString: "could not convert total difficulty to uint256",
|
||||
},
|
||||
{
|
||||
name: "current execution block has zero hash parent",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: params.BeaconConfig().ZeroHash[:],
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "could not get parent block",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
errString: "could not get parent execution block",
|
||||
},
|
||||
{
|
||||
name: "parent execution block invalid TD",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "1",
|
||||
},
|
||||
errString: "could not convert total difficulty to uint256",
|
||||
},
|
||||
{
|
||||
name: "happy case",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
wantExists: true,
|
||||
wantTerminalBlockHash: []byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "ttd not reached",
|
||||
paramsTd: "3",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x2",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = tt.paramsTd
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
var m map[[32]byte]*pb.ExecutionBlock
|
||||
if tt.parentPowBlock != nil {
|
||||
m = map[[32]byte]*pb.ExecutionBlock{
|
||||
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
|
||||
}
|
||||
}
|
||||
client := mocks.EngineClient{
|
||||
ErrLatestExecBlock: tt.errLatestExecutionBlk,
|
||||
ExecutionBlock: tt.currentPowBlock,
|
||||
BlockByHashMap: m,
|
||||
}
|
||||
b, e, err := client.GetTerminalBlockHash(context.Background())
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tt.wantExists, e)
|
||||
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_tDStringToUint256(t *testing.T) {
|
||||
i, err := tDStringToUint256("0x0")
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint256.NewInt(0), i)
|
||||
|
||||
i, err = tDStringToUint256("0x10000")
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint256.NewInt(65536), i)
|
||||
|
||||
_, err = tDStringToUint256("100")
|
||||
require.ErrorContains(t, "hex string without 0x prefix", err)
|
||||
|
||||
_, err = tDStringToUint256("0xzzzzzz")
|
||||
require.ErrorContains(t, "invalid hex string", err)
|
||||
|
||||
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
|
||||
require.ErrorContains(t, "hex number > 256 bits", err)
|
||||
}
|
||||
|
||||
func TestExchangeTransitionConfiguration(t *testing.T) {
|
||||
fix := fixtures()
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -413,11 +413,14 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
// accumulates. we finalize them here before we are ready to receive a block.
|
||||
// Otherwise, the first few blocks will be slower to compute as we will
|
||||
// hold the lock and be busy finalizing the deposits.
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex)) // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
actualIndex := int64(currIndex) - 1 // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex)
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
|
||||
// lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
if err = s.cfg.depositCache.PruneProofs(ctx, int64(currIndex)); err != nil {
|
||||
if err = s.cfg.depositCache.PruneProofs(ctx, actualIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -472,7 +472,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
require.NoError(t, s.cfg.beaconDB.SaveState(context.Background(), emptyState, headRoot))
|
||||
require.NoError(t, stateGen.SaveState(context.Background(), headRoot, emptyState))
|
||||
s.cfg.stateGen = stateGen
|
||||
require.NoError(t, emptyState.SetEth1DepositIndex(2))
|
||||
require.NoError(t, emptyState.SetEth1DepositIndex(3))
|
||||
|
||||
ctx := context.Background()
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(0), Root: headRoot[:]}))
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,25 +2,32 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
)
|
||||
|
||||
// EngineClient --
|
||||
type EngineClient struct {
|
||||
NewPayloadResp []byte
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
ErrExecBlockByHash error
|
||||
ErrForkchoiceUpdated error
|
||||
ErrNewPayload error
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
|
||||
NewPayloadResp []byte
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
ErrExecBlockByHash error
|
||||
ErrForkchoiceUpdated error
|
||||
ErrNewPayload error
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -58,3 +65,52 @@ func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash) (*
|
||||
}
|
||||
return b, e.ErrExecBlockByHash
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
|
||||
if overflows {
|
||||
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
|
||||
}
|
||||
blk, err := e.LatestExecutionBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get latest execution block")
|
||||
}
|
||||
if blk == nil {
|
||||
return nil, false, errors.New("latest execution block is nil")
|
||||
}
|
||||
|
||||
for {
|
||||
b, err := hexutil.DecodeBig(blk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
currentTotalDifficulty, _ := uint256.FromBig(b)
|
||||
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
|
||||
parentHash := bytesutil.ToBytes32(blk.ParentHash)
|
||||
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
if blockReachedTTD {
|
||||
b, err := hexutil.DecodeBig(parentBlk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
parentTotalDifficulty, _ := uint256.FromBig(b)
|
||||
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
if !parentReachedTTD {
|
||||
return blk.Hash, true, nil
|
||||
}
|
||||
} else {
|
||||
return nil, false, nil
|
||||
}
|
||||
blk = parentBlk
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,7 +77,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -162,7 +161,6 @@ go_test(
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -3,11 +3,7 @@ package validator
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -181,79 +177,7 @@ func (vs *Server) getTerminalBlockHashIfExists(ctx context.Context) ([]byte, boo
|
||||
return terminalBlockHash.Bytes(), true, nil
|
||||
}
|
||||
|
||||
return vs.getPowBlockHashAtTerminalTotalDifficulty(ctx)
|
||||
}
|
||||
|
||||
// This returns the valid terminal block hash based on total difficulty.
|
||||
//
|
||||
// Spec code:
|
||||
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
|
||||
// # `pow_chain` abstractly represents all blocks in the PoW chain
|
||||
// for block in pow_chain:
|
||||
// parent = pow_chain[block.parent_hash]
|
||||
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// if block_reached_ttd and not parent_reached_ttd:
|
||||
// return block
|
||||
//
|
||||
// return None
|
||||
func (vs *Server) getPowBlockHashAtTerminalTotalDifficulty(ctx context.Context) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
|
||||
if overflows {
|
||||
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
|
||||
}
|
||||
blk, err := vs.ExecutionEngineCaller.LatestExecutionBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get latest execution block")
|
||||
}
|
||||
if blk == nil {
|
||||
return nil, false, errors.New("latest execution block is nil")
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return nil, false, ctx.Err()
|
||||
}
|
||||
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
|
||||
parentHash := bytesutil.ToBytes32(blk.ParentHash)
|
||||
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := vs.ExecutionEngineCaller.ExecutionBlockByHash(ctx, parentHash)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
if parentBlk == nil {
|
||||
return nil, false, errors.New("parent execution block is nil")
|
||||
}
|
||||
if blockReachedTTD {
|
||||
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
if !parentReachedTTD {
|
||||
log.WithFields(logrus.Fields{
|
||||
"number": blk.Number,
|
||||
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
|
||||
"td": blk.TotalDifficulty,
|
||||
"parentTd": parentBlk.TotalDifficulty,
|
||||
"ttd": terminalTotalDifficulty,
|
||||
}).Info("Retrieved terminal block hash")
|
||||
return blk.Hash, true, nil
|
||||
}
|
||||
} else {
|
||||
return nil, false, nil
|
||||
}
|
||||
blk = parentBlk
|
||||
}
|
||||
return vs.ExecutionEngineCaller.GetTerminalBlockHash(ctx)
|
||||
}
|
||||
|
||||
// activationEpochNotReached returns true if activation epoch has not been reach.
|
||||
@@ -270,18 +194,6 @@ func activationEpochNotReached(slot types.Slot) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func tDStringToUint256(td string) (*uint256.Int, error) {
|
||||
b, err := hexutil.DecodeBig(td)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i, overflows := uint256.FromBig(b)
|
||||
if overflows {
|
||||
return nil, errors.New("total difficulty overflowed")
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func emptyPayload() *enginev1.ExecutionPayload {
|
||||
return &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/holiman/uint256"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
@@ -22,26 +21,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func Test_tDStringToUint256(t *testing.T) {
|
||||
i, err := tDStringToUint256("0x0")
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint256.NewInt(0), i)
|
||||
|
||||
i, err = tDStringToUint256("0x10000")
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint256.NewInt(65536), i)
|
||||
|
||||
_, err = tDStringToUint256("100")
|
||||
require.ErrorContains(t, "hex string without 0x prefix", err)
|
||||
|
||||
_, err = tDStringToUint256("0xzzzzzz")
|
||||
require.ErrorContains(t, "invalid hex string", err)
|
||||
|
||||
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
|
||||
require.ErrorContains(t, "hex number > 256 bits", err)
|
||||
}
|
||||
|
||||
func TestServer_activationEpochNotReached(t *testing.T) {
|
||||
require.Equal(t, false, activationEpochNotReached(0))
|
||||
|
||||
@@ -154,137 +133,6 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
paramsTd string
|
||||
currentPowBlock *pb.ExecutionBlock
|
||||
parentPowBlock *pb.ExecutionBlock
|
||||
errLatestExecutionBlk error
|
||||
wantTerminalBlockHash []byte
|
||||
wantExists bool
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "config td overflows",
|
||||
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
errString: "could not convert terminal total difficulty to uint256",
|
||||
},
|
||||
{
|
||||
name: "could not get latest execution block",
|
||||
paramsTd: "1",
|
||||
errLatestExecutionBlk: errors.New("blah"),
|
||||
errString: "could not get latest execution block",
|
||||
},
|
||||
{
|
||||
name: "nil latest execution block",
|
||||
paramsTd: "1",
|
||||
errString: "latest execution block is nil",
|
||||
},
|
||||
{
|
||||
name: "current execution block invalid TD",
|
||||
paramsTd: "1",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
},
|
||||
errString: "could not convert total difficulty to uint256",
|
||||
},
|
||||
{
|
||||
name: "current execution block has zero hash parent",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: params.BeaconConfig().ZeroHash[:],
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "could not get parent block",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
errString: "could not get parent execution block",
|
||||
},
|
||||
{
|
||||
name: "parent execution block invalid TD",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "1",
|
||||
},
|
||||
errString: "could not convert total difficulty to uint256",
|
||||
},
|
||||
{
|
||||
name: "happy case",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
wantExists: true,
|
||||
wantTerminalBlockHash: []byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "ttd not reached",
|
||||
paramsTd: "3",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x2",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = tt.paramsTd
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
var m map[[32]byte]*pb.ExecutionBlock
|
||||
if tt.parentPowBlock != nil {
|
||||
m = map[[32]byte]*pb.ExecutionBlock{
|
||||
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
|
||||
}
|
||||
}
|
||||
vs := &Server{
|
||||
ExecutionEngineCaller: &powtesting.EngineClient{
|
||||
ErrLatestExecBlock: tt.errLatestExecutionBlk,
|
||||
ExecutionBlock: tt.currentPowBlock,
|
||||
BlockByHashMap: m,
|
||||
},
|
||||
}
|
||||
b, e, err := vs.getPowBlockHashAtTerminalTotalDifficulty(context.Background())
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tt.wantExists, e)
|
||||
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -16,12 +16,19 @@ import (
|
||||
type BeaconState interface {
|
||||
ReadOnlyBeaconState
|
||||
WriteOnlyBeaconState
|
||||
SpecConstantsProvider
|
||||
Copy() BeaconState
|
||||
HashTreeRoot(ctx context.Context) ([32]byte, error)
|
||||
FutureForkStub
|
||||
StateProver
|
||||
}
|
||||
|
||||
// SpecConstantsProvider defines a struct which can provide varying configuration
|
||||
// values depending on fork versions, such as the beacon state.
|
||||
type SpecConstantsProvider interface {
|
||||
InactivityPenaltyQuotient() (uint64, error)
|
||||
}
|
||||
|
||||
// StateProver defines the ability to create Merkle proofs for beacon state fields.
|
||||
type StateProver interface {
|
||||
FinalizedRootProof(ctx context.Context) ([][]byte, error)
|
||||
|
||||
7
beacon-chain/state/state-native/v1/spec_constants.go
Normal file
7
beacon-chain/state/state-native/v1/spec_constants.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package v1
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/config/params"
|
||||
|
||||
func (b *BeaconState) InactivityPenaltyQuotient() uint64 {
|
||||
return params.BeaconConfig().InactivityPenaltyQuotient
|
||||
}
|
||||
7
beacon-chain/state/state-native/v2/spec_constants.go
Normal file
7
beacon-chain/state/state-native/v2/spec_constants.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package v2
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/config/params"
|
||||
|
||||
func (b *BeaconState) InactivityPenaltyQuotient() uint64 {
|
||||
return params.BeaconConfig().InactivityPenaltyQuotientAltair
|
||||
}
|
||||
7
beacon-chain/state/state-native/v3/spec_constants.go
Normal file
7
beacon-chain/state/state-native/v3/spec_constants.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package v3
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/config/params"
|
||||
|
||||
func (b *BeaconState) InactivityPenaltyQuotient() uint64 {
|
||||
return params.BeaconConfig().InactivityPenaltyQuotientBellatrix
|
||||
}
|
||||
7
beacon-chain/state/v1/spec_constants.go
Normal file
7
beacon-chain/state/v1/spec_constants.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package v1
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/config/params"
|
||||
|
||||
func (b *BeaconState) InactivityPenaltyQuotient() uint64 {
|
||||
return params.BeaconConfig().InactivityPenaltyQuotient
|
||||
}
|
||||
7
beacon-chain/state/v2/spec_constants.go
Normal file
7
beacon-chain/state/v2/spec_constants.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package v1
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/config/params"
|
||||
|
||||
func (b *BeaconState) InactivityPenaltyQuotient() uint64 {
|
||||
return params.BeaconConfig().InactivityPenaltyQuotientAltair
|
||||
}
|
||||
7
beacon-chain/state/v3/spec_constants.go
Normal file
7
beacon-chain/state/v3/spec_constants.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package v1
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/config/params"
|
||||
|
||||
func (b *BeaconState) InactivityPenaltyQuotient() uint64 {
|
||||
return params.BeaconConfig().InactivityPenaltyQuotientBellatrix
|
||||
}
|
||||
@@ -49,30 +49,29 @@ type Service struct {
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
}
|
||||
|
||||
// NewService configures the initial sync service responsible for bringing the node up to the
|
||||
// latest head of the blockchain.
|
||||
func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
s := &Service{
|
||||
return &Service{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
synced: abool.New(),
|
||||
chainStarted: abool.New(),
|
||||
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||
genesisChan: make(chan time.Time),
|
||||
}
|
||||
go s.waitForStateInitialization()
|
||||
return s
|
||||
}
|
||||
|
||||
// Start the initial sync service.
|
||||
func (s *Service) Start() {
|
||||
// Wait for state initialized event.
|
||||
genesis := <-s.genesisChan
|
||||
genesis, err := s.waitForStateInitialization()
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to wait for state initialization.")
|
||||
return
|
||||
}
|
||||
if genesis.IsZero() {
|
||||
log.Debug("Exiting Initial Sync Service")
|
||||
return
|
||||
@@ -180,9 +179,10 @@ func (s *Service) waitForMinimumPeers() {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Return error
|
||||
// waitForStateInitialization makes sure that beacon node is ready to be accessed: it is either
|
||||
// already properly configured or system waits up until state initialized event is triggered.
|
||||
func (s *Service) waitForStateInitialization() {
|
||||
func (s *Service) waitForStateInitialization() (time.Time, error) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
@@ -198,19 +198,14 @@ func (s *Service) waitForStateInitialization() {
|
||||
continue
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received state initialized event")
|
||||
s.genesisChan <- data.StartTime
|
||||
return
|
||||
return data.StartTime, nil
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
// Send a zero time in the event we are exiting.
|
||||
s.genesisChan <- time.Time{}
|
||||
return
|
||||
return time.Time{}, errors.New("context closed, exiting goroutine")
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
// Send a zero time in the event we are exiting.
|
||||
s.genesisChan <- time.Time{}
|
||||
return
|
||||
return time.Time{}, errors.Wrap(err, "subscription to state notifier failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,11 +168,7 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
Chain: mc,
|
||||
StateNotifier: notifier,
|
||||
})
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
assert.NotNil(t, s)
|
||||
if tt.methodRuns != nil {
|
||||
tt.methodRuns(notifier.StateFeed())
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
@@ -181,6 +177,11 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if tt.methodRuns != nil {
|
||||
tt.methodRuns(notifier.StateFeed())
|
||||
}
|
||||
|
||||
go func() {
|
||||
// Allow to exit from test (on no head loop waiting for head is started).
|
||||
// In most tests, this is redundant, as Start() already exited.
|
||||
@@ -207,7 +208,6 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
synced: abool.New(),
|
||||
chainStarted: abool.New(),
|
||||
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||
genesisChan: make(chan time.Time),
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -221,9 +221,8 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
go s.waitForStateInitialization()
|
||||
currTime := <-s.genesisChan
|
||||
assert.Equal(t, true, currTime.IsZero())
|
||||
_, err := s.waitForStateInitialization()
|
||||
assert.ErrorContains(t, "context closed", err)
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -236,8 +235,6 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.LogsContain(t, hook, "Waiting for state to be initialized")
|
||||
assert.LogsContain(t, hook, "Context closed, exiting goroutine")
|
||||
assert.LogsDoNotContain(t, hook, "Subscription to state notifier failed")
|
||||
})
|
||||
|
||||
t.Run("no state and state init event received", func(t *testing.T) {
|
||||
@@ -251,8 +248,9 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
go s.waitForStateInitialization()
|
||||
receivedGenesisTime = <-s.genesisChan
|
||||
var err error
|
||||
receivedGenesisTime, err = s.waitForStateInitialization()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, receivedGenesisTime.IsZero())
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -281,7 +279,6 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
assert.LogsContain(t, hook, "Event feed data is not type *statefeed.InitializedData")
|
||||
assert.LogsContain(t, hook, "Waiting for state to be initialized")
|
||||
assert.LogsContain(t, hook, "Received state initialized event")
|
||||
assert.LogsDoNotContain(t, hook, "Context closed, exiting goroutine")
|
||||
})
|
||||
|
||||
t.Run("no state and state init event received and service start", func(t *testing.T) {
|
||||
@@ -296,7 +293,8 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.waitForStateInitialization()
|
||||
_, err := s.waitForStateInitialization()
|
||||
require.NoError(t, err)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
@@ -321,7 +319,6 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
}
|
||||
assert.LogsContain(t, hook, "Waiting for state to be initialized")
|
||||
assert.LogsContain(t, hook, "Received state initialized event")
|
||||
assert.LogsDoNotContain(t, hook, "Context closed, exiting goroutine")
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ var Commands = &cli.Command{
|
||||
flags.WalletPasswordFileFlag,
|
||||
flags.DeletePublicKeysFlag,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
@@ -62,7 +61,6 @@ var Commands = &cli.Command{
|
||||
flags.GrpcRetriesFlag,
|
||||
flags.GrpcRetryDelayFlag,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
@@ -93,7 +91,6 @@ var Commands = &cli.Command{
|
||||
flags.BackupPublicKeysFlag,
|
||||
flags.BackupPasswordFile,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
@@ -121,7 +118,6 @@ var Commands = &cli.Command{
|
||||
flags.AccountPasswordFileFlag,
|
||||
flags.ImportPrivateKeyFileFlag,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
@@ -155,7 +151,6 @@ var Commands = &cli.Command{
|
||||
flags.GrpcRetryDelayFlag,
|
||||
flags.ExitAllFlag,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
|
||||
@@ -22,7 +22,6 @@ var Commands = &cli.Command{
|
||||
cmd.DataDirFlag,
|
||||
flags.SlashingProtectionExportDirFlag,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
@@ -47,7 +46,6 @@ var Commands = &cli.Command{
|
||||
cmd.DataDirFlag,
|
||||
flags.SlashingProtectionJSONFileFlag,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
|
||||
@@ -34,7 +34,6 @@ var Commands = &cli.Command{
|
||||
flags.Mnemonic25thWordFileFlag,
|
||||
flags.SkipMnemonic25thWordCheckFlag,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
@@ -64,7 +63,6 @@ var Commands = &cli.Command{
|
||||
flags.RemoteSignerKeyPathFlag,
|
||||
flags.RemoteSignerCACertPathFlag,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
@@ -93,7 +91,6 @@ var Commands = &cli.Command{
|
||||
flags.Mnemonic25thWordFileFlag,
|
||||
flags.SkipMnemonic25thWordCheckFlag,
|
||||
features.Mainnet,
|
||||
features.PyrmontTestnet,
|
||||
features.PraterTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
|
||||
@@ -35,9 +35,6 @@ const disabledFeatureFlag = "Disabled feature flag"
|
||||
|
||||
// Flags is a struct to represent which features the client will perform on runtime.
|
||||
type Flags struct {
|
||||
// Testnet Flags.
|
||||
PyrmontTestnet bool // PyrmontTestnet defines the flag through which we can enable the node to run on the Pyrmont testnet.
|
||||
|
||||
// Feature related flags.
|
||||
RemoteSlasherProtection bool // RemoteSlasherProtection utilizes a beacon node with --slasher mode for validator slashing protection.
|
||||
WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory.
|
||||
@@ -121,13 +118,8 @@ func InitWithReset(c *Flags) func() {
|
||||
}
|
||||
|
||||
// configureTestnet sets the config according to specified testnet flag
|
||||
func configureTestnet(ctx *cli.Context, cfg *Flags) {
|
||||
if ctx.Bool(PyrmontTestnet.Name) {
|
||||
log.Warn("Running on Pyrmont Testnet")
|
||||
params.UsePyrmontConfig()
|
||||
params.UsePyrmontNetworkConfig()
|
||||
cfg.PyrmontTestnet = true
|
||||
} else if ctx.Bool(PraterTestnet.Name) {
|
||||
func configureTestnet(ctx *cli.Context) {
|
||||
if ctx.Bool(PraterTestnet.Name) {
|
||||
log.Warn("Running on the Prater Testnet")
|
||||
params.UsePraterConfig()
|
||||
params.UsePraterNetworkConfig()
|
||||
@@ -145,7 +137,7 @@ func ConfigureBeaconChain(ctx *cli.Context) {
|
||||
if ctx.Bool(devModeFlag.Name) {
|
||||
enableDevModeFlags(ctx)
|
||||
}
|
||||
configureTestnet(ctx, cfg)
|
||||
configureTestnet(ctx)
|
||||
|
||||
if ctx.Bool(writeSSZStateTransitionsFlag.Name) {
|
||||
logEnabled(writeSSZStateTransitionsFlag)
|
||||
@@ -240,7 +232,7 @@ func ConfigureBeaconChain(ctx *cli.Context) {
|
||||
func ConfigureValidator(ctx *cli.Context) {
|
||||
complainOnDeprecatedFlags(ctx)
|
||||
cfg := &Flags{}
|
||||
configureTestnet(ctx, cfg)
|
||||
configureTestnet(ctx)
|
||||
if ctx.Bool(enableExternalSlasherProtectionFlag.Name) {
|
||||
log.Fatal(
|
||||
"Remote slashing protection has currently been disabled in Prysm due to safety concerns. " +
|
||||
|
||||
@@ -11,41 +11,41 @@ import (
|
||||
func TestInitFeatureConfig(t *testing.T) {
|
||||
defer Init(&Flags{})
|
||||
cfg := &Flags{
|
||||
PyrmontTestnet: true,
|
||||
EnablePeerScorer: true,
|
||||
}
|
||||
Init(cfg)
|
||||
c := Get()
|
||||
assert.Equal(t, true, c.PyrmontTestnet)
|
||||
assert.Equal(t, true, c.EnablePeerScorer)
|
||||
|
||||
// Reset back to false for the follow up tests.
|
||||
cfg = &Flags{PyrmontTestnet: false}
|
||||
cfg = &Flags{RemoteSlasherProtection: false}
|
||||
Init(cfg)
|
||||
}
|
||||
|
||||
func TestInitWithReset(t *testing.T) {
|
||||
defer Init(&Flags{})
|
||||
Init(&Flags{
|
||||
PyrmontTestnet: true,
|
||||
EnablePeerScorer: true,
|
||||
})
|
||||
assert.Equal(t, true, Get().PyrmontTestnet)
|
||||
assert.Equal(t, true, Get().EnablePeerScorer)
|
||||
|
||||
// Overwrite previously set value (value that didn't come by default).
|
||||
resetCfg := InitWithReset(&Flags{
|
||||
PyrmontTestnet: false,
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
assert.Equal(t, false, Get().PyrmontTestnet)
|
||||
assert.Equal(t, false, Get().EnablePeerScorer)
|
||||
|
||||
// Reset must get to previously set configuration (not to default config values).
|
||||
resetCfg()
|
||||
assert.Equal(t, true, Get().PyrmontTestnet)
|
||||
assert.Equal(t, true, Get().EnablePeerScorer)
|
||||
}
|
||||
|
||||
func TestConfigureBeaconConfig(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Bool(PyrmontTestnet.Name, true, "test")
|
||||
set.Bool(enablePeerScorer.Name, true, "test")
|
||||
context := cli.NewContext(&app, set, nil)
|
||||
ConfigureBeaconChain(context)
|
||||
c := Get()
|
||||
assert.Equal(t, true, c.PyrmontTestnet)
|
||||
assert.Equal(t, true, c.EnablePeerScorer)
|
||||
}
|
||||
|
||||
@@ -70,6 +70,11 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedPyrmontTestnet = &cli.BoolFlag{
|
||||
Name: "pyrmont",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
var deprecatedFlags = []cli.Flag{
|
||||
@@ -84,4 +89,5 @@ var deprecatedFlags = []cli.Flag{
|
||||
deprecatedDisableNextSlotStateCache,
|
||||
deprecatedAttestationAggregationStrategy,
|
||||
deprecatedForceOptMaxCoverAggregationStategy,
|
||||
deprecatedPyrmontTestnet,
|
||||
}
|
||||
|
||||
@@ -7,11 +7,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// PyrmontTestnet flag for the multiclient Ethereum consensus testnet.
|
||||
PyrmontTestnet = &cli.BoolFlag{
|
||||
Name: "pyrmont",
|
||||
Usage: "This defines the flag through which we can run on the Pyrmont Multiclient Testnet",
|
||||
}
|
||||
// PraterTestnet flag for the multiclient Ethereum consensus testnet.
|
||||
PraterTestnet = &cli.BoolFlag{
|
||||
Name: "prater",
|
||||
@@ -156,7 +151,6 @@ var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
|
||||
writeWalletPasswordOnWebOnboarding,
|
||||
enableExternalSlasherProtectionFlag,
|
||||
disableAttestingHistoryDBCache,
|
||||
PyrmontTestnet,
|
||||
PraterTestnet,
|
||||
Mainnet,
|
||||
dynamicKeyReloadDebounceInterval,
|
||||
@@ -175,7 +169,6 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
devModeFlag,
|
||||
writeSSZStateTransitionsFlag,
|
||||
disableGRPCConnectionLogging,
|
||||
PyrmontTestnet,
|
||||
PraterTestnet,
|
||||
Mainnet,
|
||||
enablePeerScorer,
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
"network_config.go",
|
||||
"testnet_e2e_config.go",
|
||||
"testnet_prater_config.go",
|
||||
"testnet_pyrmont_config.go",
|
||||
"testutils.go",
|
||||
"values.go",
|
||||
],
|
||||
@@ -48,6 +47,7 @@ go_test(
|
||||
"@consensus_spec_tests_mainnet//:test_data",
|
||||
"@consensus_spec_tests_minimal//:test_data",
|
||||
"@eth2_networks//:configs",
|
||||
"testdata/e2e_config.yaml",
|
||||
],
|
||||
gotags = ["develop"],
|
||||
race = "on",
|
||||
@@ -61,3 +61,9 @@ go_test(
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "custom_configs",
|
||||
srcs = glob(["testdata/*.yaml"]),
|
||||
visibility = ["//testing:__subpackages__"],
|
||||
)
|
||||
|
||||
@@ -18,94 +18,94 @@ import (
|
||||
|
||||
var placeholderFields = []string{"UPDATE_TIMEOUT", "INTERVALS_PER_SLOT"}
|
||||
|
||||
func TestLoadConfigFileMainnet(t *testing.T) {
|
||||
func TestLoadConfigFile(t *testing.T) {
|
||||
// See https://media.githubusercontent.com/media/ethereum/consensus-spec-tests/master/tests/minimal/config/phase0.yaml
|
||||
assertVals := func(name string, fields []string, c1, c2 *params.BeaconChainConfig) {
|
||||
assertVals := func(name string, fields []string, expected, actual *params.BeaconChainConfig) {
|
||||
// Misc params.
|
||||
assert.Equal(t, c1.MaxCommitteesPerSlot, c2.MaxCommitteesPerSlot, "%s: MaxCommitteesPerSlot", name)
|
||||
assert.Equal(t, c1.TargetCommitteeSize, c2.TargetCommitteeSize, "%s: TargetCommitteeSize", name)
|
||||
assert.Equal(t, c1.MaxValidatorsPerCommittee, c2.MaxValidatorsPerCommittee, "%s: MaxValidatorsPerCommittee", name)
|
||||
assert.Equal(t, c1.MinPerEpochChurnLimit, c2.MinPerEpochChurnLimit, "%s: MinPerEpochChurnLimit", name)
|
||||
assert.Equal(t, c1.ChurnLimitQuotient, c2.ChurnLimitQuotient, "%s: ChurnLimitQuotient", name)
|
||||
assert.Equal(t, c1.ShuffleRoundCount, c2.ShuffleRoundCount, "%s: ShuffleRoundCount", name)
|
||||
assert.Equal(t, c1.MinGenesisActiveValidatorCount, c2.MinGenesisActiveValidatorCount, "%s: MinGenesisActiveValidatorCount", name)
|
||||
assert.Equal(t, c1.MinGenesisTime, c2.MinGenesisTime, "%s: MinGenesisTime", name)
|
||||
assert.Equal(t, c1.HysteresisQuotient, c2.HysteresisQuotient, "%s: HysteresisQuotient", name)
|
||||
assert.Equal(t, c1.HysteresisDownwardMultiplier, c2.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
|
||||
assert.Equal(t, c1.HysteresisUpwardMultiplier, c2.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
|
||||
assert.Equal(t, expected.MaxCommitteesPerSlot, actual.MaxCommitteesPerSlot, "%s: MaxCommitteesPerSlot", name)
|
||||
assert.Equal(t, expected.TargetCommitteeSize, actual.TargetCommitteeSize, "%s: TargetCommitteeSize", name)
|
||||
assert.Equal(t, expected.MaxValidatorsPerCommittee, actual.MaxValidatorsPerCommittee, "%s: MaxValidatorsPerCommittee", name)
|
||||
assert.Equal(t, expected.MinPerEpochChurnLimit, actual.MinPerEpochChurnLimit, "%s: MinPerEpochChurnLimit", name)
|
||||
assert.Equal(t, expected.ChurnLimitQuotient, actual.ChurnLimitQuotient, "%s: ChurnLimitQuotient", name)
|
||||
assert.Equal(t, expected.ShuffleRoundCount, actual.ShuffleRoundCount, "%s: ShuffleRoundCount", name)
|
||||
assert.Equal(t, expected.MinGenesisActiveValidatorCount, actual.MinGenesisActiveValidatorCount, "%s: MinGenesisActiveValidatorCount", name)
|
||||
assert.Equal(t, expected.MinGenesisTime, actual.MinGenesisTime, "%s: MinGenesisTime", name)
|
||||
assert.Equal(t, expected.HysteresisQuotient, actual.HysteresisQuotient, "%s: HysteresisQuotient", name)
|
||||
assert.Equal(t, expected.HysteresisDownwardMultiplier, actual.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
|
||||
assert.Equal(t, expected.HysteresisUpwardMultiplier, actual.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
|
||||
|
||||
// Fork Choice params.
|
||||
assert.Equal(t, c1.SafeSlotsToUpdateJustified, c2.SafeSlotsToUpdateJustified, "%s: SafeSlotsToUpdateJustified", name)
|
||||
assert.Equal(t, expected.SafeSlotsToUpdateJustified, actual.SafeSlotsToUpdateJustified, "%s: SafeSlotsToUpdateJustified", name)
|
||||
|
||||
// Validator params.
|
||||
assert.Equal(t, c1.Eth1FollowDistance, c2.Eth1FollowDistance, "%s: Eth1FollowDistance", name)
|
||||
assert.Equal(t, c1.TargetAggregatorsPerCommittee, c2.TargetAggregatorsPerCommittee, "%s: TargetAggregatorsPerCommittee", name)
|
||||
assert.Equal(t, c1.RandomSubnetsPerValidator, c2.RandomSubnetsPerValidator, "%s: RandomSubnetsPerValidator", name)
|
||||
assert.Equal(t, c1.EpochsPerRandomSubnetSubscription, c2.EpochsPerRandomSubnetSubscription, "%s: EpochsPerRandomSubnetSubscription", name)
|
||||
assert.Equal(t, c1.SecondsPerETH1Block, c2.SecondsPerETH1Block, "%s: SecondsPerETH1Block", name)
|
||||
assert.Equal(t, expected.Eth1FollowDistance, actual.Eth1FollowDistance, "%s: Eth1FollowDistance", name)
|
||||
assert.Equal(t, expected.TargetAggregatorsPerCommittee, actual.TargetAggregatorsPerCommittee, "%s: TargetAggregatorsPerCommittee", name)
|
||||
assert.Equal(t, expected.RandomSubnetsPerValidator, actual.RandomSubnetsPerValidator, "%s: RandomSubnetsPerValidator", name)
|
||||
assert.Equal(t, expected.EpochsPerRandomSubnetSubscription, actual.EpochsPerRandomSubnetSubscription, "%s: EpochsPerRandomSubnetSubscription", name)
|
||||
assert.Equal(t, expected.SecondsPerETH1Block, actual.SecondsPerETH1Block, "%s: SecondsPerETH1Block", name)
|
||||
|
||||
// Deposit contract.
|
||||
assert.Equal(t, c1.DepositChainID, c2.DepositChainID, "%s: DepositChainID", name)
|
||||
assert.Equal(t, c1.DepositNetworkID, c2.DepositNetworkID, "%s: DepositNetworkID", name)
|
||||
assert.Equal(t, c1.DepositContractAddress, c2.DepositContractAddress, "%s: DepositContractAddress", name)
|
||||
assert.Equal(t, expected.DepositChainID, actual.DepositChainID, "%s: DepositChainID", name)
|
||||
assert.Equal(t, expected.DepositNetworkID, actual.DepositNetworkID, "%s: DepositNetworkID", name)
|
||||
assert.Equal(t, expected.DepositContractAddress, actual.DepositContractAddress, "%s: DepositContractAddress", name)
|
||||
|
||||
// Gwei values.
|
||||
assert.Equal(t, c1.MinDepositAmount, c2.MinDepositAmount, "%s: MinDepositAmount", name)
|
||||
assert.Equal(t, c1.MaxEffectiveBalance, c2.MaxEffectiveBalance, "%s: MaxEffectiveBalance", name)
|
||||
assert.Equal(t, c1.EjectionBalance, c2.EjectionBalance, "%s: EjectionBalance", name)
|
||||
assert.Equal(t, c1.EffectiveBalanceIncrement, c2.EffectiveBalanceIncrement, "%s: EffectiveBalanceIncrement", name)
|
||||
assert.Equal(t, expected.MinDepositAmount, actual.MinDepositAmount, "%s: MinDepositAmount", name)
|
||||
assert.Equal(t, expected.MaxEffectiveBalance, actual.MaxEffectiveBalance, "%s: MaxEffectiveBalance", name)
|
||||
assert.Equal(t, expected.EjectionBalance, actual.EjectionBalance, "%s: EjectionBalance", name)
|
||||
assert.Equal(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement, "%s: EffectiveBalanceIncrement", name)
|
||||
|
||||
// Initial values.
|
||||
assert.DeepEqual(t, c1.GenesisForkVersion, c2.GenesisForkVersion, "%s: GenesisForkVersion", name)
|
||||
assert.DeepEqual(t, c1.BLSWithdrawalPrefixByte, c2.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
|
||||
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
|
||||
assert.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
|
||||
|
||||
// Time parameters.
|
||||
assert.Equal(t, c1.GenesisDelay, c2.GenesisDelay, "%s: GenesisDelay", name)
|
||||
assert.Equal(t, c1.SecondsPerSlot, c2.SecondsPerSlot, "%s: SecondsPerSlot", name)
|
||||
assert.Equal(t, c1.MinAttestationInclusionDelay, c2.MinAttestationInclusionDelay, "%s: MinAttestationInclusionDelay", name)
|
||||
assert.Equal(t, c1.SlotsPerEpoch, c2.SlotsPerEpoch, "%s: SlotsPerEpoch", name)
|
||||
assert.Equal(t, c1.MinSeedLookahead, c2.MinSeedLookahead, "%s: MinSeedLookahead", name)
|
||||
assert.Equal(t, c1.MaxSeedLookahead, c2.MaxSeedLookahead, "%s: MaxSeedLookahead", name)
|
||||
assert.Equal(t, c1.EpochsPerEth1VotingPeriod, c2.EpochsPerEth1VotingPeriod, "%s: EpochsPerEth1VotingPeriod", name)
|
||||
assert.Equal(t, c1.SlotsPerHistoricalRoot, c2.SlotsPerHistoricalRoot, "%s: SlotsPerHistoricalRoot", name)
|
||||
assert.Equal(t, c1.MinValidatorWithdrawabilityDelay, c2.MinValidatorWithdrawabilityDelay, "%s: MinValidatorWithdrawabilityDelay", name)
|
||||
assert.Equal(t, c1.ShardCommitteePeriod, c2.ShardCommitteePeriod, "%s: ShardCommitteePeriod", name)
|
||||
assert.Equal(t, c1.MinEpochsToInactivityPenalty, c2.MinEpochsToInactivityPenalty, "%s: MinEpochsToInactivityPenalty", name)
|
||||
assert.Equal(t, expected.GenesisDelay, actual.GenesisDelay, "%s: GenesisDelay", name)
|
||||
assert.Equal(t, expected.SecondsPerSlot, actual.SecondsPerSlot, "%s: SecondsPerSlot", name)
|
||||
assert.Equal(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay, "%s: MinAttestationInclusionDelay", name)
|
||||
assert.Equal(t, expected.SlotsPerEpoch, actual.SlotsPerEpoch, "%s: SlotsPerEpoch", name)
|
||||
assert.Equal(t, expected.MinSeedLookahead, actual.MinSeedLookahead, "%s: MinSeedLookahead", name)
|
||||
assert.Equal(t, expected.MaxSeedLookahead, actual.MaxSeedLookahead, "%s: MaxSeedLookahead", name)
|
||||
assert.Equal(t, expected.EpochsPerEth1VotingPeriod, actual.EpochsPerEth1VotingPeriod, "%s: EpochsPerEth1VotingPeriod", name)
|
||||
assert.Equal(t, expected.SlotsPerHistoricalRoot, actual.SlotsPerHistoricalRoot, "%s: SlotsPerHistoricalRoot", name)
|
||||
assert.Equal(t, expected.MinValidatorWithdrawabilityDelay, actual.MinValidatorWithdrawabilityDelay, "%s: MinValidatorWithdrawabilityDelay", name)
|
||||
assert.Equal(t, expected.ShardCommitteePeriod, actual.ShardCommitteePeriod, "%s: ShardCommitteePeriod", name)
|
||||
assert.Equal(t, expected.MinEpochsToInactivityPenalty, actual.MinEpochsToInactivityPenalty, "%s: MinEpochsToInactivityPenalty", name)
|
||||
|
||||
// State vector lengths.
|
||||
assert.Equal(t, c1.EpochsPerHistoricalVector, c2.EpochsPerHistoricalVector, "%s: EpochsPerHistoricalVector", name)
|
||||
assert.Equal(t, c1.EpochsPerSlashingsVector, c2.EpochsPerSlashingsVector, "%s: EpochsPerSlashingsVector", name)
|
||||
assert.Equal(t, c1.HistoricalRootsLimit, c2.HistoricalRootsLimit, "%s: HistoricalRootsLimit", name)
|
||||
assert.Equal(t, c1.ValidatorRegistryLimit, c2.ValidatorRegistryLimit, "%s: ValidatorRegistryLimit", name)
|
||||
assert.Equal(t, expected.EpochsPerHistoricalVector, actual.EpochsPerHistoricalVector, "%s: EpochsPerHistoricalVector", name)
|
||||
assert.Equal(t, expected.EpochsPerSlashingsVector, actual.EpochsPerSlashingsVector, "%s: EpochsPerSlashingsVector", name)
|
||||
assert.Equal(t, expected.HistoricalRootsLimit, actual.HistoricalRootsLimit, "%s: HistoricalRootsLimit", name)
|
||||
assert.Equal(t, expected.ValidatorRegistryLimit, actual.ValidatorRegistryLimit, "%s: ValidatorRegistryLimit", name)
|
||||
|
||||
// Reward and penalty quotients.
|
||||
assert.Equal(t, c1.BaseRewardFactor, c2.BaseRewardFactor, "%s: BaseRewardFactor", name)
|
||||
assert.Equal(t, c1.WhistleBlowerRewardQuotient, c2.WhistleBlowerRewardQuotient, "%s: WhistleBlowerRewardQuotient", name)
|
||||
assert.Equal(t, c1.ProposerRewardQuotient, c2.ProposerRewardQuotient, "%s: ProposerRewardQuotient", name)
|
||||
assert.Equal(t, c1.InactivityPenaltyQuotient, c2.InactivityPenaltyQuotient, "%s: InactivityPenaltyQuotient", name)
|
||||
assert.Equal(t, c1.InactivityPenaltyQuotientAltair, c2.InactivityPenaltyQuotientAltair, "%s: InactivityPenaltyQuotientAltair", name)
|
||||
assert.Equal(t, c1.MinSlashingPenaltyQuotient, c2.MinSlashingPenaltyQuotient, "%s: MinSlashingPenaltyQuotient", name)
|
||||
assert.Equal(t, c1.MinSlashingPenaltyQuotientAltair, c2.MinSlashingPenaltyQuotientAltair, "%s: MinSlashingPenaltyQuotientAltair", name)
|
||||
assert.Equal(t, c1.ProportionalSlashingMultiplier, c2.ProportionalSlashingMultiplier, "%s: ProportionalSlashingMultiplier", name)
|
||||
assert.Equal(t, c1.ProportionalSlashingMultiplierAltair, c2.ProportionalSlashingMultiplierAltair, "%s: ProportionalSlashingMultiplierAltair", name)
|
||||
assert.Equal(t, expected.BaseRewardFactor, actual.BaseRewardFactor, "%s: BaseRewardFactor", name)
|
||||
assert.Equal(t, expected.WhistleBlowerRewardQuotient, actual.WhistleBlowerRewardQuotient, "%s: WhistleBlowerRewardQuotient", name)
|
||||
assert.Equal(t, expected.ProposerRewardQuotient, actual.ProposerRewardQuotient, "%s: ProposerRewardQuotient", name)
|
||||
assert.Equal(t, expected.InactivityPenaltyQuotient, actual.InactivityPenaltyQuotient, "%s: InactivityPenaltyQuotient", name)
|
||||
assert.Equal(t, expected.InactivityPenaltyQuotientAltair, actual.InactivityPenaltyQuotientAltair, "%s: InactivityPenaltyQuotientAltair", name)
|
||||
assert.Equal(t, expected.MinSlashingPenaltyQuotient, actual.MinSlashingPenaltyQuotient, "%s: MinSlashingPenaltyQuotient", name)
|
||||
assert.Equal(t, expected.MinSlashingPenaltyQuotientAltair, actual.MinSlashingPenaltyQuotientAltair, "%s: MinSlashingPenaltyQuotientAltair", name)
|
||||
assert.Equal(t, expected.ProportionalSlashingMultiplier, actual.ProportionalSlashingMultiplier, "%s: ProportionalSlashingMultiplier", name)
|
||||
assert.Equal(t, expected.ProportionalSlashingMultiplierAltair, actual.ProportionalSlashingMultiplierAltair, "%s: ProportionalSlashingMultiplierAltair", name)
|
||||
|
||||
// Max operations per block.
|
||||
assert.Equal(t, c1.MaxProposerSlashings, c2.MaxProposerSlashings, "%s: MaxProposerSlashings", name)
|
||||
assert.Equal(t, c1.MaxAttesterSlashings, c2.MaxAttesterSlashings, "%s: MaxAttesterSlashings", name)
|
||||
assert.Equal(t, c1.MaxAttestations, c2.MaxAttestations, "%s: MaxAttestations", name)
|
||||
assert.Equal(t, c1.MaxDeposits, c2.MaxDeposits, "%s: MaxDeposits", name)
|
||||
assert.Equal(t, c1.MaxVoluntaryExits, c2.MaxVoluntaryExits, "%s: MaxVoluntaryExits", name)
|
||||
assert.Equal(t, expected.MaxProposerSlashings, actual.MaxProposerSlashings, "%s: MaxProposerSlashings", name)
|
||||
assert.Equal(t, expected.MaxAttesterSlashings, actual.MaxAttesterSlashings, "%s: MaxAttesterSlashings", name)
|
||||
assert.Equal(t, expected.MaxAttestations, actual.MaxAttestations, "%s: MaxAttestations", name)
|
||||
assert.Equal(t, expected.MaxDeposits, actual.MaxDeposits, "%s: MaxDeposits", name)
|
||||
assert.Equal(t, expected.MaxVoluntaryExits, actual.MaxVoluntaryExits, "%s: MaxVoluntaryExits", name)
|
||||
|
||||
// Signature domains.
|
||||
assert.Equal(t, c1.DomainBeaconProposer, c2.DomainBeaconProposer, "%s: DomainBeaconProposer", name)
|
||||
assert.Equal(t, c1.DomainBeaconAttester, c2.DomainBeaconAttester, "%s: DomainBeaconAttester", name)
|
||||
assert.Equal(t, c1.DomainRandao, c2.DomainRandao, "%s: DomainRandao", name)
|
||||
assert.Equal(t, c1.DomainDeposit, c2.DomainDeposit, "%s: DomainDeposit", name)
|
||||
assert.Equal(t, c1.DomainVoluntaryExit, c2.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
|
||||
assert.Equal(t, c1.DomainSelectionProof, c2.DomainSelectionProof, "%s: DomainSelectionProof", name)
|
||||
assert.Equal(t, c1.DomainAggregateAndProof, c2.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
|
||||
assert.Equal(t, expected.DomainBeaconProposer, actual.DomainBeaconProposer, "%s: DomainBeaconProposer", name)
|
||||
assert.Equal(t, expected.DomainBeaconAttester, actual.DomainBeaconAttester, "%s: DomainBeaconAttester", name)
|
||||
assert.Equal(t, expected.DomainRandao, actual.DomainRandao, "%s: DomainRandao", name)
|
||||
assert.Equal(t, expected.DomainDeposit, actual.DomainDeposit, "%s: DomainDeposit", name)
|
||||
assert.Equal(t, expected.DomainVoluntaryExit, actual.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
|
||||
assert.Equal(t, expected.DomainSelectionProof, actual.DomainSelectionProof, "%s: DomainSelectionProof", name)
|
||||
assert.Equal(t, expected.DomainAggregateAndProof, actual.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
|
||||
|
||||
assertYamlFieldsMatch(t, name, fields, c1, c2)
|
||||
assertYamlFieldsMatch(t, name, fields, expected, actual)
|
||||
}
|
||||
|
||||
t.Run("mainnet", func(t *testing.T) {
|
||||
@@ -129,6 +129,17 @@ func TestLoadConfigFileMainnet(t *testing.T) {
|
||||
fields := fieldsFromYamls(t, append(minimalPresetsFiles, minimalConfigFile))
|
||||
assertVals("minimal", fields, params.MinimalSpecConfig(), params.BeaconConfig())
|
||||
})
|
||||
|
||||
t.Run("e2e", func(t *testing.T) {
|
||||
minimalPresetsFiles := presetsFilePath(t, "minimal")
|
||||
for _, fp := range minimalPresetsFiles {
|
||||
params.LoadChainConfigFile(fp, nil)
|
||||
}
|
||||
configFile := "testdata/e2e_config.yaml"
|
||||
params.LoadChainConfigFile(configFile, nil)
|
||||
fields := fieldsFromYamls(t, append(minimalPresetsFiles, configFile))
|
||||
assertVals("e2e", fields, params.E2ETestConfig(), params.BeaconConfig())
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfigFile_OverwriteCorrectly(t *testing.T) {
|
||||
|
||||
118
config/params/testdata/e2e_config.yaml
vendored
Normal file
118
config/params/testdata/e2e_config.yaml
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
# e2e config
|
||||
|
||||
# Extends the minimal preset
|
||||
PRESET_BASE: 'minimal'
|
||||
|
||||
|
||||
|
||||
# Transition
|
||||
# ---------------------------------------------------------------
|
||||
# TBD, 2**256-2**10 is a placeholder, e2e is 600
|
||||
TERMINAL_TOTAL_DIFFICULTY: 600
|
||||
# By default, don't use these params
|
||||
#TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
|
||||
#TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
|
||||
# Genesis
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 256 # Override for e2e tests
|
||||
# Jan 3, 2020
|
||||
MIN_GENESIS_TIME: 1578009600
|
||||
# Highest byte set to 0x01 to avoid collisions with mainnet versioning
|
||||
GENESIS_FORK_VERSION: 0x000000fd
|
||||
# [customized] Faster to spin up testnets, but does not give validator reasonable warning time for genesis
|
||||
GENESIS_DELAY: 10 # Override for e2e tests
|
||||
|
||||
|
||||
# Forking
|
||||
# ---------------------------------------------------------------
|
||||
# Values provided for illustrative purposes.
|
||||
# Individual tests/testnets may set different values.
|
||||
|
||||
# Altair
|
||||
ALTAIR_FORK_VERSION: 0x010000fd
|
||||
ALTAIR_FORK_EPOCH: 6 # Override for e2e
|
||||
# Bellatrix
|
||||
BELLATRIX_FORK_VERSION: 0x020000fd
|
||||
BELLATRIX_FORK_EPOCH: 8
|
||||
# Sharding
|
||||
SHARDING_FORK_VERSION: 0x030000fd
|
||||
SHARDING_FORK_EPOCH: 18446744073709551615
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# [customized] Faster for testing purposes
|
||||
SECONDS_PER_SLOT: 10 # Override for e2e tests
|
||||
# 14 (estimate from Eth1 mainnet)
|
||||
SECONDS_PER_ETH1_BLOCK: 2 # Override for e2e tests
|
||||
# 2**8 (= 256) epochs
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
|
||||
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
|
||||
SHARD_COMMITTEE_PERIOD: 4 # Override for e2e tests
|
||||
# [customized] process deposits more quickly, but insecure
|
||||
ETH1_FOLLOW_DISTANCE: 4 # Override for e2e tests
|
||||
|
||||
|
||||
# Validator cycle
|
||||
# ---------------------------------------------------------------
|
||||
# 2**2 (= 4)
|
||||
INACTIVITY_SCORE_BIAS: 4
|
||||
# 2**4 (= 16)
|
||||
INACTIVITY_SCORE_RECOVERY_RATE: 16
|
||||
# 2**4 * 10**9 (= 16,000,000,000) Gwei
|
||||
EJECTION_BALANCE: 16000000000
|
||||
# 2**2 (= 4)
|
||||
MIN_PER_EPOCH_CHURN_LIMIT: 4
|
||||
# [customized] scale queue churn at much lower validator counts for testing
|
||||
CHURN_LIMIT_QUOTIENT: 65536
|
||||
|
||||
|
||||
# Fork choice
|
||||
# ---------------------------------------------------------------
|
||||
# 70%
|
||||
PROPOSER_SCORE_BOOST: 70
|
||||
|
||||
|
||||
# Deposit contract
|
||||
# ---------------------------------------------------------------
|
||||
# Ethereum Goerli testnet
|
||||
DEPOSIT_CHAIN_ID: 1337 # Override for e2e tests
|
||||
DEPOSIT_NETWORK_ID: 1337 # Override for e2e tests
|
||||
# Configured on a per testnet basis
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
|
||||
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
# 3 * 2**24 (= 50,331,648)
|
||||
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648
|
||||
# 2**6 (= 64)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
|
||||
# 2
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
|
||||
|
||||
|
||||
# Sync committee
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
SYNC_COMMITTEE_SIZE: 32
|
||||
# [customized]
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
|
||||
|
||||
|
||||
# Sync protocol
|
||||
# ---------------------------------------------------------------
|
||||
# 1
|
||||
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
|
||||
|
||||
# Other e2e overrides
|
||||
# ---------------------------------------------------------------
|
||||
CONFIG_NAME: "end-to-end"
|
||||
SLOTS_PER_EPOCH: 6
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 2
|
||||
MAX_SEED_LOOKAHEAD: 1
|
||||
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
package params
|
||||
|
||||
import "math"
|
||||
|
||||
// UsePyrmontNetworkConfig uses the Pyrmont specific
|
||||
// network config.
|
||||
func UsePyrmontNetworkConfig() {
|
||||
cfg := BeaconNetworkConfig().Copy()
|
||||
cfg.ContractDeploymentBlock = 3743587
|
||||
cfg.BootstrapNodes = []string{
|
||||
"enr:-Ku4QOA5OGWObY8ep_x35NlGBEj7IuQULTjkgxC_0G1AszqGEA0Wn2RNlyLFx9zGTNB1gdFBA6ZDYxCgIza1uJUUOj4Dh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDVTPWXAAAgCf__________gmlkgnY0gmlwhDQPSjiJc2VjcDI1NmsxoQM6yTQB6XGWYJbI7NZFBjp4Yb9AYKQPBhVrfUclQUobb4N1ZHCCIyg",
|
||||
"enr:-Ku4QOksdA2tabOGrfOOr6NynThMoio6Ggka2oDPqUuFeWCqcRM2alNb8778O_5bK95p3EFt0cngTUXm2H7o1jkSJ_8Dh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDVTPWXAAAgCf__________gmlkgnY0gmlwhDaa13aJc2VjcDI1NmsxoQKdNQJvnohpf0VO0ZYCAJxGjT0uwJoAHbAiBMujGjK0SoN1ZHCCIyg",
|
||||
}
|
||||
OverrideBeaconNetworkConfig(cfg)
|
||||
}
|
||||
|
||||
// UsePyrmontConfig sets the main beacon chain
|
||||
// config for Pyrmont.
|
||||
func UsePyrmontConfig() {
|
||||
beaconConfig = PyrmontConfig()
|
||||
}
|
||||
|
||||
// PyrmontConfig defines the config for the
|
||||
// Pyrmont testnet.
|
||||
func PyrmontConfig() *BeaconChainConfig {
|
||||
cfg := MainnetConfig().Copy()
|
||||
cfg.MinGenesisTime = 1605700800
|
||||
cfg.GenesisDelay = 432000
|
||||
cfg.ConfigName = ConfigNames[Pyrmont]
|
||||
cfg.GenesisForkVersion = []byte{0x00, 0x00, 0x20, 0x09}
|
||||
cfg.AltairForkVersion = []byte{0x01, 0x00, 0x20, 0x09}
|
||||
cfg.AltairForkEpoch = 61650
|
||||
cfg.BellatrixForkVersion = []byte{0x02, 0x00, 0x20, 0x09}
|
||||
cfg.BellatrixForkEpoch = math.MaxUint64
|
||||
cfg.ShardingForkVersion = []byte{0x03, 0x00, 0x20, 0x09}
|
||||
cfg.ShardingForkEpoch = math.MaxUint64
|
||||
cfg.SecondsPerETH1Block = 14
|
||||
cfg.DepositChainID = 5
|
||||
cfg.DepositNetworkID = 5
|
||||
cfg.DepositContractAddress = "0x8c5fecdC472E27Bc447696F431E425D02dd46a8c"
|
||||
cfg.InitializeForkSchedule()
|
||||
return cfg
|
||||
}
|
||||
@@ -12,7 +12,6 @@ const (
|
||||
Mainnet ConfigName = iota
|
||||
Minimal
|
||||
EndToEnd
|
||||
Pyrmont
|
||||
Prater
|
||||
EndToEndMainnet
|
||||
)
|
||||
@@ -33,7 +32,6 @@ var ConfigNames = map[ConfigName]string{
|
||||
Mainnet: "mainnet",
|
||||
Minimal: "minimal",
|
||||
EndToEnd: "end-to-end",
|
||||
Pyrmont: "pyrmont",
|
||||
Prater: "prater",
|
||||
EndToEndMainnet: "end-to-end-mainnet",
|
||||
}
|
||||
@@ -42,7 +40,6 @@ var ConfigNames = map[ConfigName]string{
|
||||
var KnownConfigs = map[ConfigName]func() *BeaconChainConfig{
|
||||
Mainnet: MainnetConfig,
|
||||
Prater: PraterConfig,
|
||||
Pyrmont: PyrmontConfig,
|
||||
Minimal: MinimalSpecConfig,
|
||||
EndToEnd: E2ETestConfig,
|
||||
EndToEndMainnet: E2EMainnetTestConfig,
|
||||
|
||||
72
consensus-types/interfaces.go
Normal file
72
consensus-types/interfaces.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package consensus_types
|
||||
|
||||
import (
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// SSZItem defines a struct which provides Marshal,
|
||||
// Unmarshal, and HashTreeRoot SSZ operations.
|
||||
type SSZItem interface {
|
||||
ssz.Marshaler
|
||||
ssz.Unmarshaler
|
||||
ssz.HashRoot
|
||||
}
|
||||
|
||||
// Container defines the base methods required for a consensus
|
||||
// data structure used in Prysm, containing utilities for SSZ
|
||||
// as well as conversion methods to a protobuf representation for use
|
||||
// with Prysm's gRPC API.
|
||||
type Container interface {
|
||||
SSZItem
|
||||
IsNil() bool
|
||||
Proto() proto.Message
|
||||
FromProto(m proto.Message)
|
||||
}
|
||||
|
||||
// SignedBeaconBlock describes the method set of a signed beacon block.
|
||||
type SignedBeaconBlock interface {
|
||||
Container
|
||||
Block() BeaconBlock
|
||||
Signature() []byte
|
||||
Copy() SignedBeaconBlock
|
||||
PbGenericBlock() (*ethpb.GenericSignedBeaconBlock, error)
|
||||
PbPhase0Block() (*ethpb.SignedBeaconBlock, error)
|
||||
PbAltairBlock() (*ethpb.SignedBeaconBlockAltair, error)
|
||||
PbBellatrixBlock() (*ethpb.SignedBeaconBlockBellatrix, error)
|
||||
PbBlindedBellatrixBlock() (*ethpb.SignedBlindedBeaconBlockBellatrix, error)
|
||||
Header() (*ethpb.SignedBeaconBlockHeader, error)
|
||||
}
|
||||
|
||||
// BeaconBlock describes an interface which states the methods
|
||||
// employed by an object that is a beacon block.
|
||||
type BeaconBlock interface {
|
||||
Container
|
||||
Slot() types.Slot
|
||||
ProposerIndex() types.ValidatorIndex
|
||||
ParentRoot() []byte
|
||||
StateRoot() []byte
|
||||
Body() BeaconBlockBody
|
||||
AsSignRequestObject() validatorpb.SignRequestObject
|
||||
}
|
||||
|
||||
// BeaconBlockBody describes the method set employed by an object
|
||||
// that is a beacon block body.
|
||||
type BeaconBlockBody interface {
|
||||
Container
|
||||
RandaoReveal() []byte
|
||||
Eth1Data() *ethpb.Eth1Data
|
||||
Graffiti() []byte
|
||||
ProposerSlashings() []*ethpb.ProposerSlashing
|
||||
AttesterSlashings() []*ethpb.AttesterSlashing
|
||||
Attestations() []*ethpb.Attestation
|
||||
Deposits() []*ethpb.Deposit
|
||||
VoluntaryExits() []*ethpb.SignedVoluntaryExit
|
||||
SyncAggregate() (*ethpb.SyncAggregate, error)
|
||||
ExecutionPayload() (*enginev1.ExecutionPayload, error)
|
||||
ExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error)
|
||||
}
|
||||
7
deps.bzl
7
deps.bzl
@@ -3756,6 +3756,13 @@ def prysm_deps():
|
||||
sum = "h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=",
|
||||
version = "v2.3.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_uudashr_gocognit",
|
||||
importpath = "github.com/uudashr/gocognit",
|
||||
sum = "h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=",
|
||||
version = "v1.0.5",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_valyala_bytebufferpool",
|
||||
importpath = "github.com/valyala/bytebufferpool",
|
||||
|
||||
1
go.mod
1
go.mod
@@ -77,6 +77,7 @@ require (
|
||||
github.com/trailofbits/go-mutexasserts v0.0.0-20200708152505-19999e7d3cef
|
||||
github.com/tyler-smith/go-bip39 v1.1.0
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
github.com/uudashr/gocognit v1.0.5
|
||||
github.com/wealdtech/go-bytesutil v1.1.1
|
||||
github.com/wealdtech/go-eth2-util v1.6.3
|
||||
github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3
|
||||
|
||||
3
go.sum
3
go.sum
@@ -1358,6 +1358,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=
|
||||
github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
@@ -1804,6 +1806,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w=
|
||||
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -170,5 +170,13 @@
|
||||
"rules_go_work-.*": "Third party code",
|
||||
".*_test\\.go": "Tests are ok"
|
||||
}
|
||||
},
|
||||
"gocognit": {
|
||||
"exclude_files": {
|
||||
"external/.*": "Third party code",
|
||||
"rules_go_work-.*": "Third party code",
|
||||
".*\\.pb.*.go": "Generated code is ok",
|
||||
".*generated\\.ssz\\.go": "Generated code is ok"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -173,6 +174,13 @@ func TestJsonMarshalUnmarshal(t *testing.T) {
|
||||
require.DeepEqual(t, [][]byte{[]byte("hi")}, payloadPb.Transactions)
|
||||
require.DeepEqual(t, [][]byte{[]byte("bye")}, payloadPb.Uncles)
|
||||
})
|
||||
t.Run("nil execution block", func(t *testing.T) {
|
||||
jsonPayload := (*enginev1.ExecutionBlock)(nil)
|
||||
enc, err := json.Marshal(jsonPayload)
|
||||
require.NoError(t, err)
|
||||
payloadPb := &enginev1.ExecutionBlock{}
|
||||
require.ErrorIs(t, hexutil.ErrEmptyString, json.Unmarshal(enc, payloadPb))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPayloadIDBytes_MarshalUnmarshalJSON(t *testing.T) {
|
||||
|
||||
@@ -17,6 +17,7 @@ go_test(
|
||||
"//:prysm_sh",
|
||||
"//cmd/beacon-chain",
|
||||
"//cmd/validator",
|
||||
"//config/params:custom_configs",
|
||||
"//tools/bootnode",
|
||||
"@com_github_ethereum_go_ethereum//cmd/geth",
|
||||
"@web3signer",
|
||||
@@ -35,6 +36,7 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/operations/slashings/mock:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//build/bazel:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -71,6 +73,7 @@ go_test(
|
||||
"//:prysm_sh",
|
||||
"//cmd/beacon-chain",
|
||||
"//cmd/validator",
|
||||
"//config/params:custom_configs",
|
||||
"//tools/bootnode",
|
||||
"@com_github_ethereum_go_ethereum//cmd/geth",
|
||||
"@web3signer",
|
||||
@@ -89,6 +92,7 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//build/bazel:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -54,11 +54,15 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["web3remotesigner_test.go"],
|
||||
data = ["@web3signer"],
|
||||
data = [
|
||||
"//config/params:custom_configs",
|
||||
"@web3signer",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//testing/endtoend/params:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -151,7 +151,7 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
|
||||
args = append(args, features.E2EValidatorFlags...)
|
||||
}
|
||||
if v.config.UseWeb3RemoteSigner {
|
||||
args = append(args, fmt.Sprintf("--%s=localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
|
||||
args = append(args, fmt.Sprintf("--%s=http://localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
|
||||
// Write the pubkeys as comma seperated hex strings with 0x prefix.
|
||||
// See: https://docs.teku.consensys.net/en/latest/HowTo/External-Signer/Use-External-Signer/
|
||||
_, pubs, err := interop.DeterministicallyGenerateKeys(uint64(offset), uint64(validatorNum))
|
||||
|
||||
@@ -37,13 +37,16 @@ type rawKeyFile struct {
|
||||
}
|
||||
|
||||
type Web3RemoteSigner struct {
|
||||
ctx context.Context
|
||||
started chan struct{}
|
||||
ctx context.Context
|
||||
started chan struct{}
|
||||
configFilePath string
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
||||
func NewWeb3RemoteSigner() *Web3RemoteSigner {
|
||||
func NewWeb3RemoteSigner(configFilePath string) *Web3RemoteSigner {
|
||||
return &Web3RemoteSigner{
|
||||
started: make(chan struct{}, 1),
|
||||
started: make(chan struct{}, 1),
|
||||
configFilePath: configFilePath,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,6 +69,12 @@ func (w *Web3RemoteSigner) Start(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
network := "minimal"
|
||||
if len(w.configFilePath) > 0 {
|
||||
// A file path to yaml config file is acceptable network argument.
|
||||
network = w.configFilePath
|
||||
}
|
||||
|
||||
args := []string{
|
||||
// Global flags
|
||||
fmt.Sprintf("--key-store-path=%s", keystorePath),
|
||||
@@ -75,13 +84,13 @@ func (w *Web3RemoteSigner) Start(ctx context.Context) error {
|
||||
// Command
|
||||
"eth2",
|
||||
// Command flags
|
||||
"--network=minimal",
|
||||
"--network=" + network,
|
||||
"--slashing-protection-enabled=false", // Otherwise, a postgres DB is required.
|
||||
"--enable-key-manager-api=true",
|
||||
"--key-manager-api-enabled=true",
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Test code is safe to do this.
|
||||
|
||||
w.cmd = cmd
|
||||
// Write stdout and stderr to log files.
|
||||
stdout, err := os.Create(path.Join(e2e.TestParams.LogPath, "web3signer.stdout.log"))
|
||||
if err != nil {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/endtoend/components"
|
||||
e2eparams "github.com/prysmaticlabs/prysm/testing/endtoend/params"
|
||||
@@ -13,7 +14,11 @@ import (
|
||||
|
||||
func TestWeb3RemoteSigner_StartsAndReturnsPublicKeys(t *testing.T) {
|
||||
require.NoError(t, e2eparams.Init(0))
|
||||
wsc := components.NewWeb3RemoteSigner()
|
||||
fp, err := bazel.Runfile("config/params/testdata/e2e_config.yaml")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wsc := components.NewWeb3RemoteSigner(fp)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -6,10 +6,11 @@ lighthouse_archive_name = "lighthouse-%s-x86_64-unknown-linux-gnu-portable.tar.g
|
||||
def e2e_deps():
|
||||
http_archive(
|
||||
name = "web3signer",
|
||||
urls = ["https://artifacts.consensys.net/public/web3signer/raw/names/web3signer.tar.gz/versions/21.10.5/web3signer-21.10.5.tar.gz"],
|
||||
sha256 = "d122429f6a310bc555d1281e0b3f4e3ac43a7beec5e5dcf0a0d2416a5984f461",
|
||||
# Built from commit 17d253b which has important unreleased changes.
|
||||
urls = ["https://prysmaticlabs.com/uploads/web3signer-17d253b.tar.gz"],
|
||||
sha256 = "bf450a59a0845c1ce8100b3192c7fec021b565efe8b1ab46bed9f71cb994a6d7",
|
||||
build_file = "@prysm//testing/endtoend:web3signer.BUILD",
|
||||
strip_prefix = "web3signer-21.10.5",
|
||||
strip_prefix = "web3signer-develop",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/build/bazel"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -89,6 +90,21 @@ func (r *testRunner) run() {
|
||||
})
|
||||
}
|
||||
|
||||
var web3RemoteSigner *components.Web3RemoteSigner
|
||||
if config.UseWeb3RemoteSigner {
|
||||
cfg, err := bazel.Runfile("config/params/testdata/e2e_config.yaml")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
web3RemoteSigner = components.NewWeb3RemoteSigner(cfg)
|
||||
g.Go(func() error {
|
||||
if err := web3RemoteSigner.Start(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to start web3 remote signer")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Boot node.
|
||||
bootNode := components.NewBootNode()
|
||||
g.Go(func() error {
|
||||
@@ -146,18 +162,6 @@ func (r *testRunner) run() {
|
||||
return nil
|
||||
})
|
||||
|
||||
// Web3 remote signer.
|
||||
var web3RemoteSigner *components.Web3RemoteSigner
|
||||
if config.UseWeb3RemoteSigner {
|
||||
web3RemoteSigner = components.NewWeb3RemoteSigner()
|
||||
g.Go(func() error {
|
||||
if err := web3RemoteSigner.Start(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to start web3 remote signer")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if multiClientActive {
|
||||
lighthouseNodes = components.NewLighthouseBeaconNodes(config)
|
||||
g.Go(func() error {
|
||||
@@ -332,7 +336,6 @@ func (r *testRunner) runEvaluators(conns []*grpc.ClientConn, tickingStartTime ti
|
||||
func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
|
||||
keystorePath string, requiredNodes []e2etypes.ComponentRunner) {
|
||||
minGenesisActiveCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
|
||||
depositCheckValidator := components.NewValidatorNode(r.config, int(e2e.DepositCount), e2e.TestParams.BeaconNodeCount, minGenesisActiveCount)
|
||||
g.Go(func() error {
|
||||
if err := helpers.ComponentsStarted(ctx, requiredNodes); err != nil {
|
||||
|
||||
@@ -27,7 +27,6 @@ func TestEndToEnd_MinimalConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEndToEnd_MinimalConfig_Web3Signer(t *testing.T) {
|
||||
t.Skip("TODO(9994): Complete web3signer client implementation, currently blocked by https://github.com/ConsenSys/web3signer/issues/494")
|
||||
e2eMinimal(t, &testArgs{
|
||||
usePrysmSh: false,
|
||||
useWeb3RemoteSigner: true,
|
||||
@@ -54,6 +53,10 @@ func e2eMinimal(t *testing.T, args *testArgs) {
|
||||
epochsToRun, err = strconv.Atoi(epochStr)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// TODO(#10053): Web3signer does not support bellatrix yet.
|
||||
if args.useWeb3RemoteSigner {
|
||||
epochsToRun = helpers.BellatrixE2EForkEpoch - 1
|
||||
}
|
||||
if args.usePrysmSh {
|
||||
// If using prysm.sh, run for only 6 epochs.
|
||||
// TODO(#9166): remove this block once v2 changes are live.
|
||||
|
||||
@@ -102,3 +102,7 @@ func (m *engineMock) ExecutionBlockByHash(_ context.Context, hash common.Hash) (
|
||||
Hash: b.BlockHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *engineMock) GetTerminalBlockHash(context.Context) ([]byte, bool, error) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
14
tools/analyzers/gocognit/BUILD.bazel
Normal file
14
tools/analyzers/gocognit/BUILD.bazel
Normal file
@@ -0,0 +1,14 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["analyzer.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/tools/analyzers/gocognit",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_uudashr_gocognit//:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
|
||||
"@org_golang_x_tools//go/ast/inspector:go_default_library",
|
||||
],
|
||||
)
|
||||
BIN
tools/analyzers/gocognit/CognitiveComplexity.pdf
Normal file
BIN
tools/analyzers/gocognit/CognitiveComplexity.pdf
Normal file
Binary file not shown.
195
tools/analyzers/gocognit/README.md
Normal file
195
tools/analyzers/gocognit/README.md
Normal file
@@ -0,0 +1,195 @@
|
||||
> Copied from https://github.com/uudashr/gocognit/blob/5bf67146515e79acd2a8d5728deafa9d91ad48db/README.md
|
||||
> License: MIT
|
||||
|
||||
[](https://godoc.org/github.com/uudashr/gocognit)
|
||||
# Gocognit
|
||||
Gocognit calculates cognitive complexities of functions in Go source code. A measurement of how hard does the code is intuitively to understand.
|
||||
|
||||
## Understanding the complexity
|
||||
|
||||
Given code using `if` statement,
|
||||
```go
|
||||
func GetWords(number int) string {
|
||||
if number == 1 { // +1
|
||||
return "one"
|
||||
} else if number == 2 { // +1
|
||||
return "a couple"
|
||||
} else if number == 3 { // +1
|
||||
return "a few"
|
||||
} else { // +1
|
||||
return "lots"
|
||||
}
|
||||
} // Cognitive complexity = 4
|
||||
```
|
||||
|
||||
Above code can be refactored using `switch` statement,
|
||||
```go
|
||||
func GetWords(number int) string {
|
||||
switch number { // +1
|
||||
case 1:
|
||||
return "one"
|
||||
case 2:
|
||||
return "a couple"
|
||||
case 3:
|
||||
return "a few"
|
||||
default:
|
||||
return "lots"
|
||||
}
|
||||
} // Cognitive complexity = 1
|
||||
```
|
||||
|
||||
As you see above codes are the same, but the second code are easier to understand, that is why the cognitive complexity score are lower compare to the first one.
|
||||
|
||||
## Comparison with cyclometic complexity
|
||||
|
||||
### Example 1
|
||||
#### Cyclometic complexity
|
||||
```go
|
||||
func GetWords(number int) string { // +1
|
||||
switch number {
|
||||
case 1: // +1
|
||||
return "one"
|
||||
case 2: // +1
|
||||
return "a couple"
|
||||
case 3: // +1
|
||||
return "a few"
|
||||
default:
|
||||
return "lots"
|
||||
}
|
||||
} // Cyclomatic complexity = 4
|
||||
```
|
||||
|
||||
#### Cognitive complexity
|
||||
```go
|
||||
func GetWords(number int) string {
|
||||
switch number { // +1
|
||||
case 1:
|
||||
return "one"
|
||||
case 2:
|
||||
return "a couple"
|
||||
case 3:
|
||||
return "a few"
|
||||
default:
|
||||
return "lots"
|
||||
}
|
||||
} // Cognitive complexity = 1
|
||||
```
|
||||
|
||||
Cognitive complexity give lower score compare to cyclomatic complexity.
|
||||
|
||||
### Example 2
|
||||
#### Cyclomatic complexity
|
||||
```go
|
||||
func SumOfPrimes(max int) int { // +1
|
||||
var total int
|
||||
|
||||
OUT:
|
||||
for i := 1; i < max; i++ { // +1
|
||||
for j := 2; j < i; j++ { // +1
|
||||
if i%j == 0 { // +1
|
||||
continue OUT
|
||||
}
|
||||
}
|
||||
total += i
|
||||
}
|
||||
|
||||
return total
|
||||
} // Cyclomatic complexity = 4
|
||||
```
|
||||
|
||||
#### Cognitive complexity
|
||||
```go
|
||||
func SumOfPrimes(max int) int {
|
||||
var total int
|
||||
|
||||
OUT:
|
||||
for i := 1; i < max; i++ { // +1
|
||||
for j := 2; j < i; j++ { // +2 (nesting = 1)
|
||||
if i%j == 0 { // +3 (nesting = 2)
|
||||
continue OUT // +1
|
||||
}
|
||||
}
|
||||
total += i
|
||||
}
|
||||
|
||||
return total
|
||||
} // Cognitive complexity = 7
|
||||
```
|
||||
|
||||
Cognitive complexity give higher score compare to cyclomatic complexity.
|
||||
|
||||
## Rules
|
||||
|
||||
The cognitive complexity of a function is calculated according to the
|
||||
following rules:
|
||||
> Note: these rules are specific for Go, please see the [original whitepaper](./CognitiveComplexity.pdf) for more complete reference.
|
||||
|
||||
### Increments
|
||||
There is an increment for each of the following:
|
||||
1. `if`, `else if`, `else`
|
||||
2. `switch`, `select`
|
||||
3. `for`
|
||||
4. `goto` LABEL, `break` LABEL, `continue` LABEL
|
||||
5. sequence of binary logical operators
|
||||
6. each method in a recursion cycle
|
||||
|
||||
### Nesting level
|
||||
The following structures increment the nesting level:
|
||||
1. `if`, `else if`, `else`
|
||||
2. `switch`, `select`
|
||||
3. `for`
|
||||
4. function literal or lambda
|
||||
|
||||
### Nesting increments
|
||||
The following structures receive a nesting increment commensurate with their nested depth inside nesting structures:
|
||||
1. `if`
|
||||
2. `switch`, `select`
|
||||
3. `for`
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go install github.com/uudashr/gocognit/cmd/gocognit@latest
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
$ go get github.com/uudashr/gocognit/cmd/gocognit
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
$ gocognit
|
||||
Calculate cognitive complexities of Go functions.
|
||||
Usage:
|
||||
gocognit [flags] <Go file or directory> ...
|
||||
Flags:
|
||||
-over N show functions with complexity > N only and
|
||||
return exit code 1 if the set is non-empty
|
||||
-top N show the top N most complex functions only
|
||||
-avg show the average complexity over all functions,
|
||||
not depending on whether -over or -top are set
|
||||
The output fields for each line are:
|
||||
<complexity> <package> <function> <file:row:column>
|
||||
```
|
||||
|
||||
Examples:
|
||||
|
||||
```
|
||||
$ gocognit .
|
||||
$ gocognit main.go
|
||||
$ gocognit -top 10 src/
|
||||
$ gocognit -over 25 docker
|
||||
$ gocognit -avg .
|
||||
```
|
||||
|
||||
The output fields for each line are:
|
||||
```
|
||||
<complexity> <package> <function> <file:row:column>
|
||||
```
|
||||
|
||||
## Related project
|
||||
- [Gocyclo](https://github.com/fzipp/gocyclo) where the code are based on.
|
||||
- [Cognitive Complexity: A new way of measuring understandability](./CognitiveComplexity.pdf) white paper by G. Ann Campbell.
|
||||
90
tools/analyzers/gocognit/analyzer.go
Normal file
90
tools/analyzers/gocognit/analyzer.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package gocognit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
|
||||
"github.com/uudashr/gocognit"
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/passes/inspect"
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
)
|
||||
|
||||
// Doc explaining the tool.
|
||||
const Doc = "Tool to ensure go code does not have high cognitive complexity."
|
||||
|
||||
// Analyzer runs static analysis.
|
||||
var Analyzer = &analysis.Analyzer{
|
||||
Name: "gocognit",
|
||||
Doc: Doc,
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Run: run,
|
||||
}
|
||||
|
||||
// Recommended thresholds according to the 2008 presentation titled
|
||||
// "Software Quality Metrics to Identify Risk" by Thomas McCabe Jr.
|
||||
//
|
||||
// 1 - 10 Simple procedure, little risk
|
||||
// 11 - 20 More complex, moderate risk
|
||||
// 21 - 50 Complex, high risk
|
||||
// > 50 Untestable code, very high risk
|
||||
//
|
||||
// This threshold should be lowered to 50 over time.
|
||||
const over = 130
|
||||
|
||||
func run(pass *analysis.Pass) (interface{}, error) {
|
||||
inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
|
||||
if !ok {
|
||||
return nil, errors.New("analyzer is not type *inspector.Inspector")
|
||||
}
|
||||
|
||||
nodeFilter := []ast.Node{
|
||||
(*ast.FuncDecl)(nil),
|
||||
}
|
||||
inspect.Preorder(nodeFilter, func(n ast.Node) {
|
||||
fnDecl, ok := n.(*ast.FuncDecl)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
fnName := funcName(fnDecl)
|
||||
fnComplexity := gocognit.Complexity(fnDecl)
|
||||
|
||||
if fnComplexity > over {
|
||||
pass.Reportf(fnDecl.Pos(), "cognitive complexity %d of func %s is high (> %d)", fnComplexity, fnName, over)
|
||||
}
|
||||
})
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// funcName returns the name representation of a function or method:
|
||||
// "(Type).Name" for methods or simply "Name" for functions.
|
||||
//
|
||||
// Copied from https://github.com/uudashr/gocognit/blob/5bf67146515e79acd2a8d5728deafa9d91ad48db/gocognit.go
|
||||
// License: MIT
|
||||
func funcName(fn *ast.FuncDecl) string {
|
||||
if fn.Recv != nil {
|
||||
if fn.Recv.NumFields() > 0 {
|
||||
typ := fn.Recv.List[0].Type
|
||||
return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name)
|
||||
}
|
||||
}
|
||||
return fn.Name.Name
|
||||
}
|
||||
|
||||
// recvString returns a string representation of recv of the
|
||||
// form "T", "*T", or "BADRECV" (if not a proper receiver type).
|
||||
//
|
||||
// Copied from https://github.com/uudashr/gocognit/blob/5bf67146515e79acd2a8d5728deafa9d91ad48db/gocognit.go
|
||||
// License: MIT
|
||||
func recvString(recv ast.Expr) string {
|
||||
switch t := recv.(type) {
|
||||
case *ast.Ident:
|
||||
return t.Name
|
||||
case *ast.StarExpr:
|
||||
return "*" + recvString(t.X)
|
||||
}
|
||||
return "BADRECV"
|
||||
}
|
||||
@@ -7,7 +7,7 @@ Flags:
|
||||
-beacon string
|
||||
gRPC address of the Prysm beacon node (default "127.0.0.1:4000")
|
||||
-genesis uint
|
||||
Genesis time. mainnet=1606824023, prater=1616508000, pyrmont=1605722407 (default 1606824023)
|
||||
Genesis time. mainnet=1606824023, prater=1616508000 (default 1606824023)
|
||||
```
|
||||
|
||||
Usage:
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
var (
|
||||
beacon = flag.String("beacon", "127.0.0.1:4000", "gRPC address of the Prysm beacon node")
|
||||
genesis = flag.Uint64("genesis", 1606824023, "Genesis time. mainnet=1606824023, prater=1616508000, pyrmont=1605722407")
|
||||
genesis = flag.Uint64("genesis", 1606824023, "Genesis time. mainnet=1606824023, prater=1616508000")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -276,9 +276,7 @@ func displayExitInfo(rawExitedKeys [][]byte, trimmedExitedKeys []string) {
|
||||
urlFormattedPubKeys := make([]string, len(rawExitedKeys))
|
||||
for i, key := range rawExitedKeys {
|
||||
var baseUrl string
|
||||
if params.BeaconConfig().ConfigName == params.ConfigNames[params.Pyrmont] {
|
||||
baseUrl = "https://pyrmont.beaconcha.in/validator/"
|
||||
} else if params.BeaconConfig().ConfigName == params.ConfigNames[params.Prater] {
|
||||
if params.BeaconConfig().ConfigName == params.ConfigNames[params.Prater] {
|
||||
baseUrl = "https://prater.beaconcha.in/validator/"
|
||||
} else {
|
||||
baseUrl = "https://beaconcha.in/validator/"
|
||||
|
||||
Reference in New Issue
Block a user