Compare commits

..

3 Commits

Author SHA1 Message Date
nisdas
54ae2b32b2 Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into e2eProxy 2022-04-12 18:04:39 +08:00
nisdas
b84c1aa3ea hack it in 2022-04-10 16:31:15 +08:00
nisdas
621e149dce add all this stuff 2022-04-09 21:09:06 +08:00
87 changed files with 1368 additions and 1959 deletions

View File

@@ -115,19 +115,18 @@ nogo(
"@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
"//tools/analyzers/comparesame:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/cryptorand:go_default_library",
"//tools/analyzers/errcheck:go_default_library",
"//tools/analyzers/featureconfig:go_default_library",
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/comparesame:go_default_library",
"//tools/analyzers/shadowpredecl:go_default_library",
"//tools/analyzers/nop:go_default_library",
"//tools/analyzers/slicedirect:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",
"//tools/analyzers/recursivelock:go_default_library",
"//tools/analyzers/shadowpredecl:go_default_library",
"//tools/analyzers/slicedirect:go_default_library",
"//tools/analyzers/uintcast:go_default_library",
] + select({
# nogo checks that fail with coverage enabled.

View File

@@ -176,7 +176,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.17.9",
go_version = "1.17.6",
nogo = "@//:nogo",
)

View File

@@ -66,7 +66,6 @@ go_library(
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",

View File

@@ -273,13 +273,13 @@ func (s *Service) CurrentFork() *ethpb.Fork {
// IsCanonical returns true if the input block root is part of the canonical chain.
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
// If the block has not been finalized, check fork choice store to see if the block is canonical
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
// If the block has been finalized, the block will always be part of the canonical chain.
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
return true, nil
}
// If the block has been finalized, the block will always be part of the canonical chain.
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
// If the block has not been finalized, check fork choice store to see if the block is canonical
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
}
// ChainHeads returns all possible chain heads (leaves of fork choice tree).

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/math"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
@@ -403,17 +402,10 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
// We update the cache up to the last deposit index in the finalized block's state.
// We can be confident that these deposits will be included in some block
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
if err != nil {
return errors.Wrap(err, "could not cast eth1 deposit index")
}
// The deposit index in the state is always the index of the next deposit
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
eth1DepositIndex -= 1
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
if err = s.cfg.DepositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
return nil

View File

@@ -1516,7 +1516,6 @@ func TestInsertFinalizedDeposits(t *testing.T) {
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
zeroSig := [96]byte{}
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
@@ -1530,64 +1529,8 @@ func TestInsertFinalizedDeposits(t *testing.T) {
}
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
}
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts = append(opts, WithDepositCache(depositCache))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(6))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
gs2 := gs.Copy()
assert.NoError(t, gs2.SetEth1Data(&ethpb.Eth1Data{DepositCount: 15}))
assert.NoError(t, gs2.SetEth1DepositIndex(13))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
zeroSig := [96]byte{}
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
root := []byte(strconv.Itoa(int(i)))
assert.NoError(t, depositCache.InsertDeposit(ctx, &ethpb.Deposit{Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
Amount: 0,
Signature: zeroSig[:],
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
}
// Insert 3 deposits before hand.
depositCache.InsertFinalizedDeposits(ctx, 2)
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
fDeposits := depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(105))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}
// Insert New Finalized State with higher deposit count.
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
fDeposits = depositCache.FinalizedDeposits(ctx)
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
assert.Equal(t, 9, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
deps := depositCache.AllDeposits(ctx, big.NewInt(109))
for _, d := range deps {
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
}

View File

@@ -83,8 +83,7 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
// In this case, we could exit early to save on additional computation.
blockRoot := bytesutil.ToBytes32(root)
if s.cfg.ForkChoiceStore.HasNode(blockRoot) && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
if s.cfg.ForkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
return nil
}

View File

@@ -137,22 +137,6 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
depositTrie := dc.finalizedDeposits.Deposits
insertIndex := int(dc.finalizedDeposits.MerkleTrieIndex + 1)
// Don't insert into finalized trie if there is no deposit to
// insert.
if len(dc.deposits) == 0 {
return
}
// In the event we have less deposits than we need to
// finalize we finalize till the index on which we do have it.
if len(dc.deposits) <= int(eth1DepositIndex) {
eth1DepositIndex = int64(len(dc.deposits)) - 1
}
// If we finalize to some lower deposit index, we
// ignore it.
if int(eth1DepositIndex) < insertIndex {
return
}
for _, d := range dc.deposits {
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
continue

View File

@@ -459,7 +459,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
Index: 1,
},
}
newFinalizedDeposit := &ethpb.DepositContainer{
newFinalizedDeposit := ethpb.DepositContainer{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{2}, 48),
@@ -471,17 +471,17 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
}
dc.deposits = oldFinalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 1)
// Artificially exclude old deposits so that they can only be retrieved from previously finalized deposits.
dc.deposits = []*ethpb.DepositContainer{&newFinalizedDeposit}
dc.InsertFinalizedDeposits(context.Background(), 2)
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex)
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
var deps [][]byte
for _, d := range oldFinalizedDeposits {
for _, d := range append(oldFinalizedDeposits, &newFinalizedDeposit) {
hash, err := d.Deposit.Data.HashTreeRoot()
require.NoError(t, err, "Could not hash deposit data")
deps = append(deps, hash[:])
@@ -491,140 +491,6 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
}
func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
dc, err := New()
require.NoError(t, err)
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex)
}
func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
dc, err := New()
require.NoError(t, err)
finalizedDeposits := []*ethpb.DepositContainer{
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{0}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 0,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{1}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 1,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{2}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 2,
},
}
dc.deposits = finalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 5)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
}
func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
dc, err := New()
require.NoError(t, err)
finalizedDeposits := []*ethpb.DepositContainer{
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{0}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 0,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{1}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 1,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{2}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 2,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{3}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 3,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{4}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 4,
},
{
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{5}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: 5,
},
}
dc.deposits = finalizedDeposits
dc.InsertFinalizedDeposits(context.Background(), 5)
// Reinsert finalized deposits with a lower index.
dc.InsertFinalizedDeposits(context.Background(), 2)
cachedDeposits := dc.FinalizedDeposits(context.Background())
require.NotNil(t, cachedDeposits, "Deposits not cached")
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex)
}
func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
dc, err := New()
require.NoError(t, err)
@@ -749,85 +615,6 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
assert.Equal(t, 1, len(deps))
}
func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
dc, err := New()
require.NoError(t, err)
generateCtr := func(height uint64, index int64) *ethpb.DepositContainer {
return &ethpb.DepositContainer{
Eth1BlockHeight: height,
Deposit: &ethpb.Deposit{
Data: &ethpb.Deposit_Data{
PublicKey: bytesutil.PadTo([]byte{uint8(index)}, 48),
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
},
},
Index: index,
}
}
finalizedDeposits := []*ethpb.DepositContainer{
generateCtr(10, 0),
generateCtr(11, 1),
generateCtr(12, 2),
generateCtr(12, 3),
generateCtr(13, 4),
generateCtr(13, 5),
generateCtr(13, 6),
generateCtr(14, 7),
}
dc.deposits = append(finalizedDeposits,
generateCtr(15, 8),
generateCtr(15, 9),
generateCtr(30, 10))
trieItems := make([][]byte, 0, len(dc.deposits))
for _, dep := range dc.allDeposits(big.NewInt(30)) {
depHash, err := dep.Data.HashTreeRoot()
assert.NoError(t, err)
trieItems = append(trieItems, depHash[:])
}
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a non-sensical ordering
dc.InsertFinalizedDeposits(context.Background(), 10)
dc.InsertFinalizedDeposits(context.Background(), 2)
dc.InsertFinalizedDeposits(context.Background(), 3)
dc.InsertFinalizedDeposits(context.Background(), 4)
// Mimick finalized deposit trie fetch.
fd := dc.FinalizedDeposits(context.Background())
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
insertIndex := fd.MerkleTrieIndex + 1
for _, dep := range deps {
depHash, err := dep.Data.HashTreeRoot()
assert.NoError(t, err)
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
assert.NoError(t, err)
}
insertIndex++
}
dc.InsertFinalizedDeposits(context.Background(), 15)
dc.InsertFinalizedDeposits(context.Background(), 15)
dc.InsertFinalizedDeposits(context.Background(), 14)
fd = dc.FinalizedDeposits(context.Background())
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(30))
insertIndex = fd.MerkleTrieIndex + 1
for _, dep := range deps {
depHash, err := dep.Data.HashTreeRoot()
assert.NoError(t, err)
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
assert.NoError(t, err)
}
insertIndex++
}
assert.Equal(t, fd.Deposits.NumOfItems(), depositTrie.NumOfItems())
}
func TestPruneProofs_Ok(t *testing.T) {
dc, err := New()
require.NoError(t, err)

View File

@@ -47,18 +47,14 @@ type Config struct {
// into the beacon chain database and running services at start up. This service should not be used in production
// as it does not have any value other than ease of use for testing purposes.
func NewService(ctx context.Context, cfg *Config) *Service {
log.Warn("Saving generated genesis state in database for interop testing")
ctx, cancel := context.WithCancel(ctx)
return &Service{
s := &Service{
cfg: cfg,
ctx: ctx,
cancel: cancel,
}
}
// Start initializes the genesis state from configured flags.
func (s *Service) Start() {
log.Warn("Saving generated genesis state in database for interop testing")
if s.cfg.GenesisPath != "" {
data, err := ioutil.ReadFile(s.cfg.GenesisPath)
@@ -73,14 +69,14 @@ func (s *Service) Start() {
if err != nil {
log.Fatalf("Could not get state trie: %v", err)
}
if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil {
if err := s.saveGenesisState(ctx, genesisTrie); err != nil {
log.Fatalf("Could not save interop genesis state %v", err)
}
return
return s
}
// Save genesis state in db
genesisState, _, err := interop.GenerateGenesisState(s.ctx, s.cfg.GenesisTime, s.cfg.NumValidators)
genesisState, _, err := interop.GenerateGenesisState(ctx, s.cfg.GenesisTime, s.cfg.NumValidators)
if err != nil {
log.Fatalf("Could not generate interop genesis state: %v", err)
}
@@ -96,11 +92,17 @@ func (s *Service) Start() {
if err != nil {
log.Fatalf("Could not hash tree root genesis state: %v", err)
}
go slots.CountdownToGenesis(s.ctx, time.Unix(int64(s.cfg.GenesisTime), 0), s.cfg.NumValidators, gRoot)
go slots.CountdownToGenesis(ctx, time.Unix(int64(s.cfg.GenesisTime), 0), s.cfg.NumValidators, gRoot)
if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil {
if err := s.saveGenesisState(ctx, genesisTrie); err != nil {
log.Fatalf("Could not save interop genesis state %v", err)
}
return s
}
// Start initializes the genesis state from configured flags.
func (_ *Service) Start() {
}
// Stop does nothing.

View File

@@ -95,7 +95,6 @@ func NewService(ctx context.Context, config *ValidatorMonitorConfig, tracked []t
latestPerformance: make(map[types.ValidatorIndex]ValidatorLatestPerformance),
aggregatedPerformance: make(map[types.ValidatorIndex]ValidatorAggregatedPerformance),
trackedSyncCommitteeIndices: make(map[types.ValidatorIndex][]types.CommitteeIndex),
isLogging: false,
}
for _, idx := range tracked {
r.TrackedValidators[idx] = true
@@ -118,6 +117,7 @@ func (s *Service) Start() {
"ValidatorIndices": tracked,
}).Info("Starting service")
s.isLogging = false
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)

View File

@@ -906,7 +906,6 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error {
DepositCache: b.depositCache,
GenesisPath: genesisStatePath,
})
svc.Start()
// Register genesis state as start-up state when interop mode.
// The start-up state gets reused across services.

View File

@@ -120,16 +120,11 @@ func (s *Service) handleExchangeConfigurationError(err error) {
// Logs the terminal total difficulty status.
func (s *Service) logTtdStatus(ctx context.Context, ttd *uint256.Int) (bool, error) {
latest, err := s.LatestExecutionBlock(ctx)
switch {
case errors.Is(err, hexutil.ErrEmptyString):
return false, nil
case err != nil:
if err != nil {
return false, err
case latest == nil:
}
if latest == nil {
return false, errors.New("latest block is nil")
case latest.TotalDifficulty == "":
return false, nil
default:
}
latestTtd, err := hexutil.DecodeBig(latest.TotalDifficulty)
if err != nil {

View File

@@ -190,38 +190,6 @@ func TestService_logTtdStatus(t *testing.T) {
require.Equal(t, false, reached)
}
func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
resp := (*pb.ExecutionBlock)(nil) // Nil response when a client is not synced
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": resp,
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
service := &Service{
cfg: &config{},
}
service.rpcClient = rpcClient
ttd := new(uint256.Int)
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
require.NoError(t, err)
require.Equal(t, false, reached)
}
func emptyPayload() *pb.ExecutionPayload {
return &pb.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),

View File

@@ -10,12 +10,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -52,8 +49,8 @@ type EngineCaller interface {
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) error
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error)
}
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
@@ -177,78 +174,6 @@ func (s *Service) ExchangeTransitionConfiguration(
return nil
}
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
//
// Spec code:
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
// # `pow_chain` abstractly represents all blocks in the PoW chain
// for block in pow_chain:
// parent = pow_chain[block.parent_hash]
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// if block_reached_ttd and not parent_reached_ttd:
// return block
//
// return None
func (s *Service) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := s.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
if ctx.Err() != nil {
return nil, false, ctx.Err()
}
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if parentBlk == nil {
return nil, false, errors.New("parent execution block is nil")
}
if blockReachedTTD {
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
log.WithFields(logrus.Fields{
"number": blk.Number,
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
"td": blk.TotalDifficulty,
"parentTd": parentBlk.TotalDifficulty,
"ttd": terminalTotalDifficulty,
}).Info("Retrieved terminal block hash")
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
}
// LatestExecutionBlock fetches the latest execution engine block by calling
// eth_blockByNumber via JSON-RPC.
func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
@@ -326,15 +251,3 @@ func isTimeout(e error) bool {
t, ok := e.(httpTimeoutError)
return ok && t.Timeout()
}
func tDStringToUint256(td string) (*uint256.Int, error) {
b, err := hexutil.DecodeBig(td)
if err != nil {
return nil, err
}
i, overflows := uint256.FromBig(b)
if overflows {
return nil, errors.New("total difficulty overflowed")
}
return i, nil
}

View File

@@ -418,155 +418,6 @@ func TestClient_HTTP(t *testing.T) {
})
}
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
tests := []struct {
name string
paramsTd string
currentPowBlock *pb.ExecutionBlock
parentPowBlock *pb.ExecutionBlock
errLatestExecutionBlk error
wantTerminalBlockHash []byte
wantExists bool
errString string
}{
{
name: "config td overflows",
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
errString: "could not convert terminal total difficulty to uint256",
},
{
name: "could not get latest execution block",
paramsTd: "1",
errLatestExecutionBlk: errors.New("blah"),
errString: "could not get latest execution block",
},
{
name: "nil latest execution block",
paramsTd: "1",
errString: "latest execution block is nil",
},
{
name: "current execution block invalid TD",
paramsTd: "1",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "current execution block has zero hash parent",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: params.BeaconConfig().ZeroHash[:],
TotalDifficulty: "0x3",
},
},
{
name: "could not get parent block",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
errString: "could not get parent execution block",
},
{
name: "parent execution block invalid TD",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "1",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "happy case",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
wantExists: true,
wantTerminalBlockHash: []byte{'a'},
},
{
name: "ttd not reached",
paramsTd: "3",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x2",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = tt.paramsTd
params.OverrideBeaconConfig(cfg)
var m map[[32]byte]*pb.ExecutionBlock
if tt.parentPowBlock != nil {
m = map[[32]byte]*pb.ExecutionBlock{
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
}
}
client := mocks.EngineClient{
ErrLatestExecBlock: tt.errLatestExecutionBlk,
ExecutionBlock: tt.currentPowBlock,
BlockByHashMap: m,
}
b, e, err := client.GetTerminalBlockHash(context.Background())
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tt.wantExists, e)
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
}
})
}
}
func Test_tDStringToUint256(t *testing.T) {
i, err := tDStringToUint256("0x0")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(0), i)
i, err = tDStringToUint256("0x10000")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(65536), i)
_, err = tDStringToUint256("100")
require.ErrorContains(t, "hex string without 0x prefix", err)
_, err = tDStringToUint256("0xzzzzzz")
require.ErrorContains(t, "invalid hex string", err)
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
require.ErrorContains(t, "hex number > 256 bits", err)
}
func TestExchangeTransitionConfiguration(t *testing.T) {
fix := fixtures()
ctx := context.Background()

View File

@@ -413,14 +413,11 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
// accumulates. we finalize them here before we are ready to receive a block.
// Otherwise, the first few blocks will be slower to compute as we will
// hold the lock and be busy finalizing the deposits.
// The deposit index in the state is always the index of the next deposit
// to be included(rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
actualIndex := int64(currIndex) - 1 // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex)
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex)) // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.depositCache.PruneProofs(ctx, actualIndex); err != nil {
// lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
if err = s.cfg.depositCache.PruneProofs(ctx, int64(currIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
}

View File

@@ -472,7 +472,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
require.NoError(t, s.cfg.beaconDB.SaveState(context.Background(), emptyState, headRoot))
require.NoError(t, stateGen.SaveState(context.Background(), headRoot, emptyState))
s.cfg.stateGen = stateGen
require.NoError(t, emptyState.SetEth1DepositIndex(3))
require.NoError(t, emptyState.SetEth1DepositIndex(2))
ctx := context.Background()
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(0), Root: headRoot[:]}))

View File

@@ -26,7 +26,6 @@ go_library(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -2,32 +2,25 @@ package testing
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
// EngineClient --
type EngineClient struct {
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
ErrExecBlockByHash error
ErrForkchoiceUpdated error
ErrNewPayload error
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
TerminalBlockHash []byte
TerminalBlockHashExists bool
NewPayloadResp []byte
PayloadIDBytes *pb.PayloadIDBytes
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
ErrExecBlockByHash error
ErrForkchoiceUpdated error
ErrNewPayload error
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
}
// NewPayload --
@@ -65,52 +58,3 @@ func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash) (*
}
return b, e.ErrExecBlockByHash
}
// GetTerminalBlockHash --
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := e.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
b, err := hexutil.DecodeBig(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
currentTotalDifficulty, _ := uint256.FromBig(b)
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if blockReachedTTD {
b, err := hexutil.DecodeBig(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentTotalDifficulty, _ := uint256.FromBig(b)
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
}

View File

@@ -77,6 +77,7 @@ go_library(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -161,6 +162,7 @@ go_test(
"@com_github_d4l3k_messagediff//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -3,7 +3,11 @@ package validator
import (
"bytes"
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -177,7 +181,79 @@ func (vs *Server) getTerminalBlockHashIfExists(ctx context.Context) ([]byte, boo
return terminalBlockHash.Bytes(), true, nil
}
return vs.ExecutionEngineCaller.GetTerminalBlockHash(ctx)
return vs.getPowBlockHashAtTerminalTotalDifficulty(ctx)
}
// This returns the valid terminal block hash based on total difficulty.
//
// Spec code:
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
// # `pow_chain` abstractly represents all blocks in the PoW chain
// for block in pow_chain:
// parent = pow_chain[block.parent_hash]
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// if block_reached_ttd and not parent_reached_ttd:
// return block
//
// return None
func (vs *Server) getPowBlockHashAtTerminalTotalDifficulty(ctx context.Context) ([]byte, bool, error) {
ttd := new(big.Int)
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
if overflows {
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
}
blk, err := vs.ExecutionEngineCaller.LatestExecutionBlock(ctx)
if err != nil {
return nil, false, errors.Wrap(err, "could not get latest execution block")
}
if blk == nil {
return nil, false, errors.New("latest execution block is nil")
}
for {
if ctx.Err() != nil {
return nil, false, ctx.Err()
}
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
parentHash := bytesutil.ToBytes32(blk.ParentHash)
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
return nil, false, nil
}
parentBlk, err := vs.ExecutionEngineCaller.ExecutionBlockByHash(ctx, parentHash)
if err != nil {
return nil, false, errors.Wrap(err, "could not get parent execution block")
}
if parentBlk == nil {
return nil, false, errors.New("parent execution block is nil")
}
if blockReachedTTD {
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
if err != nil {
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
}
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
if !parentReachedTTD {
log.WithFields(logrus.Fields{
"number": blk.Number,
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
"td": blk.TotalDifficulty,
"parentTd": parentBlk.TotalDifficulty,
"ttd": terminalTotalDifficulty,
}).Info("Retrieved terminal block hash")
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}
}
// activationEpochNotReached returns true if activation epoch has not been reach.
@@ -194,6 +270,18 @@ func activationEpochNotReached(slot types.Slot) bool {
return false
}
func tDStringToUint256(td string) (*uint256.Int, error) {
b, err := hexutil.DecodeBig(td)
if err != nil {
return nil, err
}
i, overflows := uint256.FromBig(b)
if overflows {
return nil, errors.New("total difficulty overflowed")
}
return i, nil
}
func emptyPayload() *enginev1.ExecutionPayload {
return &enginev1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),

View File

@@ -6,6 +6,7 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
types "github.com/prysmaticlabs/eth2-types"
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
@@ -21,6 +22,26 @@ import (
"github.com/prysmaticlabs/prysm/testing/util"
)
func Test_tDStringToUint256(t *testing.T) {
i, err := tDStringToUint256("0x0")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(0), i)
i, err = tDStringToUint256("0x10000")
require.NoError(t, err)
require.DeepEqual(t, uint256.NewInt(65536), i)
_, err = tDStringToUint256("100")
require.ErrorContains(t, "hex string without 0x prefix", err)
_, err = tDStringToUint256("0xzzzzzz")
require.ErrorContains(t, "invalid hex string", err)
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
require.ErrorContains(t, "hex number > 256 bits", err)
}
func TestServer_activationEpochNotReached(t *testing.T) {
require.Equal(t, false, activationEpochNotReached(0))
@@ -133,6 +154,137 @@ func TestServer_getExecutionPayload(t *testing.T) {
}
}
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
tests := []struct {
name string
paramsTd string
currentPowBlock *pb.ExecutionBlock
parentPowBlock *pb.ExecutionBlock
errLatestExecutionBlk error
wantTerminalBlockHash []byte
wantExists bool
errString string
}{
{
name: "config td overflows",
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
errString: "could not convert terminal total difficulty to uint256",
},
{
name: "could not get latest execution block",
paramsTd: "1",
errLatestExecutionBlk: errors.New("blah"),
errString: "could not get latest execution block",
},
{
name: "nil latest execution block",
paramsTd: "1",
errString: "latest execution block is nil",
},
{
name: "current execution block invalid TD",
paramsTd: "1",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "current execution block has zero hash parent",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: params.BeaconConfig().ZeroHash[:],
TotalDifficulty: "0x3",
},
},
{
name: "could not get parent block",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
errString: "could not get parent execution block",
},
{
name: "parent execution block invalid TD",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "1",
},
errString: "could not convert total difficulty to uint256",
},
{
name: "happy case",
paramsTd: "2",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x3",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
wantExists: true,
wantTerminalBlockHash: []byte{'a'},
},
{
name: "ttd not reached",
paramsTd: "3",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x2",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = tt.paramsTd
params.OverrideBeaconConfig(cfg)
var m map[[32]byte]*pb.ExecutionBlock
if tt.parentPowBlock != nil {
m = map[[32]byte]*pb.ExecutionBlock{
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
}
}
vs := &Server{
ExecutionEngineCaller: &powtesting.EngineClient{
ErrLatestExecBlock: tt.errLatestExecutionBlk,
ExecutionBlock: tt.currentPowBlock,
BlockByHashMap: m,
},
}
b, e, err := vs.getPowBlockHashAtTerminalTotalDifficulty(context.Background())
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tt.wantExists, e)
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
}
})
}
}
func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
tests := []struct {
name string

View File

@@ -116,14 +116,21 @@ type Config struct {
// be registered into a running beacon node.
func NewService(ctx context.Context, cfg *Config) *Service {
ctx, cancel := context.WithCancel(ctx)
s := &Service{
return &Service{
cfg: cfg,
ctx: ctx,
cancel: cancel,
incomingAttestation: make(chan *ethpbv1alpha1.Attestation, params.BeaconConfig().DefaultBufferSize),
connectedRPCClients: make(map[net.Addr]bool),
}
}
// paranoid build time check to ensure ChainInfoFetcher implements required interfaces
var _ stategen.CanonicalChecker = blockchain.ChainInfoFetcher(nil)
var _ stategen.CurrentSlotter = blockchain.ChainInfoFetcher(nil)
// Start the gRPC server.
func (s *Service) Start() {
address := fmt.Sprintf("%s:%s", s.cfg.Host, s.cfg.Port)
lis, err := net.Listen("tcp", address)
if err != nil {
@@ -152,6 +159,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
)),
grpc.MaxRecvMsgSize(s.cfg.MaxMsgSize),
}
grpc_prometheus.EnableHandlingTimeHistogram()
if s.cfg.CertFlag != "" && s.cfg.KeyFlag != "" {
creds, err := credentials.NewServerTLSFromFile(s.cfg.CertFlag, s.cfg.KeyFlag)
if err != nil {
@@ -165,17 +173,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
}
s.grpcServer = grpc.NewServer(opts...)
return s
}
// paranoid build time check to ensure ChainInfoFetcher implements required interfaces
var _ stategen.CanonicalChecker = blockchain.ChainInfoFetcher(nil)
var _ stategen.CurrentSlotter = blockchain.ChainInfoFetcher(nil)
// Start the gRPC server.
func (s *Service) Start() {
grpc_prometheus.EnableHandlingTimeHistogram()
var stateCache stategen.CachedGetter
if s.cfg.StateGen != nil {
stateCache = s.cfg.StateGen.CombinedCache()

View File

@@ -56,7 +56,7 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
numOfElems: retrieveLength(elements),
numOfElems: reflect.Indirect(reflect.ValueOf(elements)).Len(),
}, nil
case types.CompositeArray, types.CompressedArray:
return &FieldTrie{
@@ -66,7 +66,7 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: length,
numOfElems: retrieveLength(elements),
numOfElems: reflect.Indirect(reflect.ValueOf(elements)).Len(),
}, nil
default:
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(dataType).Name())
@@ -97,14 +97,14 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
if err != nil {
return [32]byte{}, err
}
f.numOfElems = retrieveLength(elements)
f.numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
return fieldRoot, nil
case types.CompositeArray:
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(fieldRoots, indices, f.fieldLayers)
if err != nil {
return [32]byte{}, err
}
f.numOfElems = retrieveLength(elements)
f.numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
return stateutil.AddInMixin(fieldRoot, uint64(len(f.fieldLayers[0])))
case types.CompressedArray:
numOfElems, err := f.field.ElemsInChunk()
@@ -133,7 +133,7 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
if err != nil {
return [32]byte{}, err
}
f.numOfElems = retrieveLength(elements)
f.numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
return stateutil.AddInMixin(fieldRoot, uint64(f.numOfElems))
default:
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())

View File

@@ -57,11 +57,9 @@ func validateElements(field types.FieldIndex, dataType types.DataType, elements
}
length *= comLength
}
elemLen := retrieveLength(elements)
castedLen := int(length) // lint:ignore uintcast- ajhdjhd
if elemLen > castedLen {
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(version.Phase0), elemLen, length)
val := reflect.Indirect(reflect.ValueOf(elements))
if uint64(val.Len()) > length {
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(version.Phase0), val.Len(), length)
}
return nil
}
@@ -74,7 +72,7 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
case [][]byte:
return handleByteArrays(val, indices, convertAll)
case *customtypes.BlockRoots:
return handleIndexer(val, indices, convertAll)
return handle32ByteArrays(val[:], indices, convertAll)
default:
return nil, errors.Errorf("Incorrect type used for block roots")
}
@@ -92,7 +90,7 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
case [][]byte:
return handleByteArrays(val, indices, convertAll)
case *customtypes.RandaoMixes:
return handleIndexer(val, indices, convertAll)
return handle32ByteArrays(val[:], indices, convertAll)
default:
return nil, errors.Errorf("Incorrect type used for randao mixes")
}
@@ -184,34 +182,6 @@ func handle32ByteArrays(val [][32]byte, indices []uint64, convertAll bool) ([][3
return roots, nil
}
// handle32ByteArrays computes and returns 32 byte arrays in a slice of root format.
func handleIndexer(indexer customtypes.Indexer, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
totalLength := indexer.TotalLength()
if convertAll {
length = int(totalLength) // lint:ignore uintcast- ajhdjhd
}
roots := make([][32]byte, 0, length)
rootCreator := func(input [32]byte) {
roots = append(roots, input)
}
if convertAll {
for i := uint64(0); i < uint64(length); i++ {
rootCreator(indexer.RootAtIndex(i))
}
return roots, nil
}
if totalLength > 0 {
for _, idx := range indices {
if idx > totalLength-1 {
return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, totalLength)
}
rootCreator(indexer.RootAtIndex(idx))
}
}
return roots, nil
}
// handleValidatorSlice returns the validator indices in a slice of root format.
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
length := len(indices)
@@ -378,17 +348,3 @@ func handleBalanceSlice(val, indices []uint64, convertAll bool) ([][32]byte, err
}
return [][32]byte{}, nil
}
func retrieveLength(elements interface{}) int {
elemLen := int(0)
elemVal := reflect.ValueOf(elements)
if reflect.Indirect(elemVal).Kind() == reflect.Struct {
meth := elemVal.MethodByName("TotalLength")
ret := meth.Call([]reflect.Value{})
elemLen = int(ret[0].Uint()) // lint:ignore uintcast- ajhdjhd
} else {
val := reflect.Indirect(elemVal)
elemLen = val.Len()
}
return elemLen
}

View File

@@ -12,7 +12,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/custom-types",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/state/stateutil:go_default_library",
"//config/fieldparams:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
],

View File

@@ -2,162 +2,28 @@ package customtypes
import (
"fmt"
"reflect"
"runtime"
"sort"
"sync"
"unsafe"
fssz "github.com/ferranbt/fastssz"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
)
var _ fssz.HashRoot = (*BlockRoots)(nil)
var _ fssz.HashRoot = (BlockRoots)([fieldparams.BlockRootsLength][32]byte{})
var _ fssz.Marshaler = (*BlockRoots)(nil)
var _ fssz.Unmarshaler = (*BlockRoots)(nil)
type Indexer interface {
RootAtIndex(idx uint64) [32]byte
TotalLength() uint64
}
// BlockRoots represents block roots of the beacon state.
type BlockRoots struct {
baseArray *baseArrayBlockRoots
fieldJournal map[uint64][32]byte
generation uint64
*stateutil.Reference
}
type baseArrayBlockRoots struct {
baseArray *[fieldparams.BlockRootsLength][32]byte
descendantMap map[uint64][]uintptr
*sync.RWMutex
*stateutil.Reference
}
type sorter struct {
objs [][]uintptr
generations []uint64
}
func (s sorter) Len() int {
return len(s.generations)
}
func (s sorter) Swap(i, j int) {
s.objs[i], s.objs[j] = s.objs[j], s.objs[i]
s.generations[i], s.generations[j] = s.generations[j], s.generations[i]
}
func (s sorter) Less(i, j int) bool {
return s.generations[i] < s.generations[j]
}
func (b *baseArrayBlockRoots) RootAtIndex(idx uint64) [32]byte {
b.RWMutex.RLock()
defer b.RWMutex.RUnlock()
return b.baseArray[idx]
}
func (b *baseArrayBlockRoots) TotalLength() uint64 {
return fieldparams.BlockRootsLength
}
func (b *baseArrayBlockRoots) addGeneration(generation uint64, descendant uintptr) {
b.RWMutex.Lock()
defer b.RWMutex.Unlock()
b.descendantMap[generation] = append(b.descendantMap[generation], descendant)
}
func (b *baseArrayBlockRoots) removeGeneration(generation uint64, descendant uintptr) {
b.RWMutex.Lock()
defer b.RWMutex.Unlock()
ptrVals := b.descendantMap[generation]
newVals := []uintptr{}
for _, v := range ptrVals {
if v == descendant {
continue
}
newVals = append(newVals, v)
}
b.descendantMap[generation] = newVals
}
func (b *baseArrayBlockRoots) numOfDescendants() uint64 {
b.RWMutex.RLock()
defer b.RWMutex.RUnlock()
return uint64(len(b.descendantMap))
}
func (b *baseArrayBlockRoots) cleanUp() {
b.RWMutex.Lock()
defer b.RWMutex.Unlock()
fmt.Printf("\n cleaning up block roots %d \n ", len(b.descendantMap))
listOfObjs := [][]uintptr{}
generations := []uint64{}
for g, objs := range b.descendantMap {
generations = append(generations, g)
listOfObjs = append(listOfObjs, objs)
}
sortedObj := sorter{
objs: listOfObjs,
generations: generations,
}
sort.Sort(sortedObj)
lastReferencedGen := 0
lastRefrencedIdx := 0
lastRefPointer := 0
for i, g := range sortedObj.generations {
for j, o := range sortedObj.objs[i] {
x := (*BlockRoots)(unsafe.Pointer(o))
if x == nil {
continue
}
lastReferencedGen = int(g) // lint:ignore uintcast- ajhdjhd
lastRefrencedIdx = i
lastRefPointer = j
break
}
if lastReferencedGen != 0 {
break
}
}
fmt.Printf("\n block root map %d, %d, %d \n ", lastReferencedGen, lastRefrencedIdx, lastRefPointer)
br := (*BlockRoots)(unsafe.Pointer(sortedObj.objs[lastRefrencedIdx][lastRefPointer]))
for k, v := range br.fieldJournal {
b.baseArray[k] = v
}
sortedObj.generations = sortedObj.generations[lastRefrencedIdx:]
sortedObj.objs = sortedObj.objs[lastRefrencedIdx:]
newMap := make(map[uint64][]uintptr)
for i, g := range sortedObj.generations {
newMap[g] = sortedObj.objs[i]
}
b.descendantMap = newMap
}
type BlockRoots [fieldparams.BlockRootsLength][32]byte
// HashTreeRoot returns calculated hash root.
func (r *BlockRoots) HashTreeRoot() ([32]byte, error) {
func (r BlockRoots) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(r)
}
// HashTreeRootWith hashes a BlockRoots object with a Hasher from the default HasherPool.
func (r *BlockRoots) HashTreeRootWith(hh *fssz.Hasher) error {
func (r BlockRoots) HashTreeRootWith(hh *fssz.Hasher) error {
index := hh.Index()
for i := uint64(0); i < r.baseArray.TotalLength(); i++ {
if val, ok := r.fieldJournal[i]; ok {
hh.Append(val[:])
continue
}
rt := r.baseArray.RootAtIndex(i)
hh.Append(rt[:])
for _, sRoot := range r {
hh.Append(sRoot[:])
}
hh.Merkleize(index)
return nil
@@ -168,13 +34,12 @@ func (r *BlockRoots) UnmarshalSSZ(buf []byte) error {
if len(buf) != r.SizeSSZ() {
return fmt.Errorf("expected buffer of length %d received %d", r.SizeSSZ(), len(buf))
}
r.baseArray.Lock()
defer r.baseArray.Unlock()
for i := range r.baseArray.baseArray {
copy(r.baseArray.baseArray[i][:], buf[i*32:(i+1)*32])
var roots BlockRoots
for i := range roots {
copy(roots[i][:], buf[i*32:(i+1)*32])
}
*r = roots
return nil
}
@@ -190,13 +55,10 @@ func (r *BlockRoots) MarshalSSZTo(dst []byte) ([]byte, error) {
// MarshalSSZ marshals BlockRoots into a serialized object.
func (r *BlockRoots) MarshalSSZ() ([]byte, error) {
marshalled := make([]byte, fieldparams.BlockRootsLength*32)
for i := uint64(0); i < r.baseArray.TotalLength(); i++ {
if val, ok := r.fieldJournal[i]; ok {
copy(marshalled[i*32:], val[:])
continue
for i, r32 := range r {
for j, rr := range r32 {
marshalled[i*32+j] = rr
}
rt := r.baseArray.RootAtIndex(i)
copy(marshalled[i*32:], rt[:])
}
return marshalled, nil
}
@@ -211,152 +73,10 @@ func (r *BlockRoots) Slice() [][]byte {
if r == nil {
return nil
}
bRoots := make([][]byte, r.baseArray.TotalLength())
for i := uint64(0); i < r.baseArray.TotalLength(); i++ {
if val, ok := r.fieldJournal[i]; ok {
bRoots[i] = val[:]
continue
}
rt := r.baseArray.RootAtIndex(i)
bRoots[i] = rt[:]
bRoots := make([][]byte, len(r))
for i, root := range r {
tmp := root
bRoots[i] = tmp[:]
}
return bRoots
}
// Slice converts a customtypes.BlockRoots object into a 2D byte slice.
func (r *BlockRoots) Array() [fieldparams.BlockRootsLength][32]byte {
if r == nil {
return [fieldparams.BlockRootsLength][32]byte{}
}
bRoots := [fieldparams.BlockRootsLength][32]byte{}
for i := uint64(0); i < r.baseArray.TotalLength(); i++ {
if val, ok := r.fieldJournal[i]; ok {
bRoots[i] = val
continue
}
rt := r.baseArray.RootAtIndex(i)
bRoots[i] = rt
}
return bRoots
}
func SetFromSlice(slice [][]byte) *BlockRoots {
br := &BlockRoots{
baseArray: &baseArrayBlockRoots{
baseArray: new([fieldparams.BlockRootsLength][32]byte),
descendantMap: map[uint64][]uintptr{},
RWMutex: new(sync.RWMutex),
Reference: stateutil.NewRef(1),
},
fieldJournal: map[uint64][32]byte{},
Reference: stateutil.NewRef(1),
}
for i, rt := range slice {
copy(br.baseArray.baseArray[i][:], rt)
}
runtime.SetFinalizer(br, blockRootsFinalizer)
return br
}
func (r *BlockRoots) SetFromBaseField(field [fieldparams.BlockRootsLength][32]byte) {
r.baseArray = &baseArrayBlockRoots{
baseArray: &field,
descendantMap: map[uint64][]uintptr{},
RWMutex: new(sync.RWMutex),
Reference: stateutil.NewRef(1),
}
r.fieldJournal = map[uint64][32]byte{}
r.Reference = stateutil.NewRef(1)
r.baseArray.addGeneration(0, reflect.ValueOf(r).Pointer())
runtime.SetFinalizer(r, blockRootsFinalizer)
}
func (r *BlockRoots) RootAtIndex(idx uint64) [32]byte {
if val, ok := r.fieldJournal[idx]; ok {
return val
}
return r.baseArray.RootAtIndex(idx)
}
func (r *BlockRoots) SetRootAtIndex(idx uint64, val [32]byte) {
if r.Refs() <= 1 && r.baseArray.Refs() <= 1 {
r.baseArray.Lock()
r.baseArray.baseArray[idx] = val
r.baseArray.Unlock()
return
}
if r.Refs() <= 1 {
r.fieldJournal[idx] = val
r.baseArray.removeGeneration(r.generation, reflect.ValueOf(r).Pointer())
r.generation++
r.baseArray.addGeneration(r.generation, reflect.ValueOf(r).Pointer())
return
}
newJournal := make(map[uint64][32]byte)
for k, val := range r.fieldJournal {
newJournal[k] = val
}
r.fieldJournal = newJournal
r.MinusRef()
r.Reference = stateutil.NewRef(1)
r.fieldJournal[idx] = val
r.baseArray.removeGeneration(r.generation, reflect.ValueOf(r).Pointer())
r.generation++
r.baseArray.addGeneration(r.generation, reflect.ValueOf(r).Pointer())
}
func (r *BlockRoots) Copy() *BlockRoots {
r.baseArray.AddRef()
r.Reference.AddRef()
br := &BlockRoots{
baseArray: r.baseArray,
fieldJournal: r.fieldJournal,
Reference: r.Reference,
generation: r.generation,
}
r.baseArray.addGeneration(r.generation, reflect.ValueOf(br).Pointer())
if r.baseArray.numOfDescendants() > 20 {
r.baseArray.cleanUp()
}
runtime.SetFinalizer(br, blockRootsFinalizer)
return br
}
func (r *BlockRoots) TotalLength() uint64 {
return fieldparams.BlockRootsLength
}
func (r *BlockRoots) IncreaseRef() {
r.Reference.AddRef()
r.baseArray.Reference.AddRef()
}
func (r *BlockRoots) DecreaseRef() {
r.Reference.MinusRef()
r.baseArray.Reference.MinusRef()
}
func blockRootsFinalizer(br *BlockRoots) {
br.baseArray.Lock()
defer br.baseArray.Unlock()
ptrVal := reflect.ValueOf(br).Pointer()
vals, ok := br.baseArray.descendantMap[br.generation]
if !ok {
return
}
exists := false
wantedVals := []uintptr{}
for _, v := range vals {
if v == ptrVal {
exists = true
continue
}
newV := v
wantedVals = append(wantedVals, newV)
}
if !exists {
return
}
br.baseArray.descendantMap[br.generation] = wantedVals
}

View File

@@ -1,25 +1,24 @@
package customtypes
import (
"bytes"
"reflect"
"testing"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/testing/assert"
)
func TestBlockRoots_Casting(t *testing.T) {
var b [fieldparams.BlockRootsLength][32]byte
f := SetFromSlice([][]byte{})
f.SetFromBaseField(b)
if !reflect.DeepEqual(f.Array(), b) {
t.Errorf("Unequal: %v = %v", f.Array(), b)
d := BlockRoots(b)
if !reflect.DeepEqual([fieldparams.BlockRootsLength][32]byte(d), b) {
t.Errorf("Unequal: %v = %v", d, b)
}
}
func TestBlockRoots_UnmarshalSSZ(t *testing.T) {
t.Run("Ok", func(t *testing.T) {
d := SetFromSlice([][]byte{})
d := BlockRoots{}
var b [fieldparams.BlockRootsLength][32]byte
b[0] = [32]byte{'f', 'o', 'o'}
b[1] = [32]byte{'b', 'a', 'r'}
@@ -33,8 +32,8 @@ func TestBlockRoots_UnmarshalSSZ(t *testing.T) {
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !reflect.DeepEqual(b, d.Array()) {
t.Errorf("Unequal: %v = %v", b, d.Array())
if !reflect.DeepEqual(b, [fieldparams.BlockRootsLength][32]byte(d)) {
t.Errorf("Unequal: %v = %v", b, [fieldparams.BlockRootsLength][32]byte(d))
}
})
@@ -71,47 +70,28 @@ func TestBlockRoots_MarshalSSZTo(t *testing.T) {
}
func TestBlockRoots_MarshalSSZ(t *testing.T) {
d := SetFromSlice([][]byte{})
d.IncreaseRef()
d.SetRootAtIndex(0, [32]byte{'f', 'o', 'o'})
d.IncreaseRef()
d.IncreaseRef()
d.SetRootAtIndex(1, [32]byte{'b', 'a', 'r'})
d := BlockRoots{}
d[0] = [32]byte{'f', 'o', 'o'}
d[1] = [32]byte{'b', 'a', 'r'}
b, err := d.MarshalSSZ()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
rt := d.RootAtIndex(0)
if !reflect.DeepEqual(rt[:], b[0:32]) {
t.Errorf("Unequal: %v = %v", rt, b[0:32])
if !reflect.DeepEqual(d[0][:], b[0:32]) {
t.Errorf("Unequal: %v = %v", d[0], b[0:32])
}
rt = d.RootAtIndex(1)
if !reflect.DeepEqual(rt[:], b[32:64]) {
t.Errorf("Unequal: %v = %v", rt, b[32:64])
}
d2 := SetFromSlice([][]byte{})
err = d2.UnmarshalSSZ(b)
if err != nil {
t.Error(err)
}
res, err := d2.MarshalSSZ()
if err != nil {
t.Error(err)
}
if !bytes.Equal(res, b) {
t.Error("unequal")
if !reflect.DeepEqual(d[1][:], b[32:64]) {
t.Errorf("Unequal: %v = %v", d[0], b[32:64])
}
}
func TestBlockRoots_SizeSSZ(t *testing.T) {
d := SetFromSlice([][]byte{})
d := BlockRoots{}
if d.SizeSSZ() != fieldparams.BlockRootsLength*32 {
t.Errorf("Wrong SSZ size. Expected %v vs actual %v", fieldparams.BlockRootsLength*32, d.SizeSSZ())
}
}
/*
func TestBlockRoots_Slice(t *testing.T) {
a, b, c := [32]byte{'a'}, [32]byte{'b'}, [32]byte{'c'}
roots := BlockRoots{}
@@ -123,4 +103,3 @@ func TestBlockRoots_Slice(t *testing.T) {
assert.DeepEqual(t, b[:], slice[10])
assert.DeepEqual(t, c[:], slice[100])
}
*/

View File

@@ -2,77 +2,48 @@ package customtypes
import (
"fmt"
"sync"
fssz "github.com/ferranbt/fastssz"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
)
var _ fssz.HashRoot = (*RandaoMixes)(nil)
var _ fssz.HashRoot = (RandaoMixes)([fieldparams.RandaoMixesLength][32]byte{})
var _ fssz.Marshaler = (*RandaoMixes)(nil)
var _ fssz.Unmarshaler = (*RandaoMixes)(nil)
// BlockRoots represents block roots of the beacon state.
type RandaoMixes struct {
baseArray *baseArrayRandaoMixes
fieldJournal map[uint64][32]byte
*stateutil.Reference
}
type baseArrayRandaoMixes struct {
baseArray *[fieldparams.RandaoMixesLength][32]byte
*sync.RWMutex
*stateutil.Reference
}
func (b *baseArrayRandaoMixes) RootAtIndex(idx uint64) [32]byte {
b.RWMutex.RLock()
defer b.RWMutex.RUnlock()
return b.baseArray[idx]
}
func (b *baseArrayRandaoMixes) TotalLength() uint64 {
return fieldparams.RandaoMixesLength
}
// RandaoMixes represents RANDAO mixes of the beacon state.
type RandaoMixes [fieldparams.RandaoMixesLength][32]byte
// HashTreeRoot returns calculated hash root.
func (r *RandaoMixes) HashTreeRoot() ([32]byte, error) {
func (r RandaoMixes) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(r)
}
// HashTreeRootWith hashes a BlockRoots object with a Hasher from the default HasherPool.
func (r *RandaoMixes) HashTreeRootWith(hh *fssz.Hasher) error {
// HashTreeRootWith hashes a RandaoMixes object with a Hasher from the default HasherPool.
func (r RandaoMixes) HashTreeRootWith(hh *fssz.Hasher) error {
index := hh.Index()
for i := uint64(0); i < r.baseArray.TotalLength(); i++ {
if val, ok := r.fieldJournal[i]; ok {
hh.Append(val[:])
continue
}
rt := r.baseArray.RootAtIndex(i)
hh.Append(rt[:])
for _, sRoot := range r {
hh.Append(sRoot[:])
}
hh.Merkleize(index)
return nil
}
// UnmarshalSSZ deserializes the provided bytes buffer into the BlockRoots object.
// UnmarshalSSZ deserializes the provided bytes buffer into the RandaoMixes object.
func (r *RandaoMixes) UnmarshalSSZ(buf []byte) error {
if len(buf) != r.SizeSSZ() {
return fmt.Errorf("expected buffer of length %d received %d", r.SizeSSZ(), len(buf))
}
r.baseArray.Lock()
defer r.baseArray.Unlock()
for i := range r.baseArray.baseArray {
copy(r.baseArray.baseArray[i][:], buf[i*32:(i+1)*32])
var roots RandaoMixes
for i := range roots {
copy(roots[i][:], buf[i*32:(i+1)*32])
}
*r = roots
return nil
}
// MarshalSSZTo marshals BlockRoots with the provided byte slice.
// MarshalSSZTo marshals RandaoMixes with the provided byte slice.
func (r *RandaoMixes) MarshalSSZTo(dst []byte) ([]byte, error) {
marshalled, err := r.MarshalSSZ()
if err != nil {
@@ -81,16 +52,13 @@ func (r *RandaoMixes) MarshalSSZTo(dst []byte) ([]byte, error) {
return append(dst, marshalled...), nil
}
// MarshalSSZ marshals BlockRoots into a serialized object.
// MarshalSSZ marshals RandaoMixes into a serialized object.
func (r *RandaoMixes) MarshalSSZ() ([]byte, error) {
marshalled := make([]byte, fieldparams.RandaoMixesLength*32)
for i := uint64(0); i < r.baseArray.TotalLength(); i++ {
if val, ok := r.fieldJournal[i]; ok {
copy(marshalled[i*32:], val[:])
continue
for i, r32 := range r {
for j, rr := range r32 {
marshalled[i*32+j] = rr
}
rt := r.baseArray.RootAtIndex(i)
copy(marshalled[i*32:], rt[:])
}
return marshalled, nil
}
@@ -100,90 +68,15 @@ func (_ *RandaoMixes) SizeSSZ() int {
return fieldparams.RandaoMixesLength * 32
}
// Slice converts a customtypes.BlockRoots object into a 2D byte slice.
// Slice converts a customtypes.RandaoMixes object into a 2D byte slice.
func (r *RandaoMixes) Slice() [][]byte {
if r == nil {
return nil
}
bRoots := make([][]byte, r.baseArray.TotalLength())
for i := uint64(0); i < r.baseArray.TotalLength(); i++ {
if val, ok := r.fieldJournal[i]; ok {
bRoots[i] = val[:]
continue
}
rt := r.baseArray.RootAtIndex(i)
bRoots[i] = rt[:]
mixes := make([][]byte, len(r))
for i, root := range r {
tmp := root
mixes[i] = tmp[:]
}
return bRoots
}
func SetFromSliceRandao(slice [][]byte) *RandaoMixes {
br := &RandaoMixes{
baseArray: &baseArrayRandaoMixes{
baseArray: new([fieldparams.RandaoMixesLength][32]byte),
RWMutex: new(sync.RWMutex),
Reference: stateutil.NewRef(1),
},
fieldJournal: map[uint64][32]byte{},
Reference: stateutil.NewRef(1),
}
for i, rt := range slice {
copy(br.baseArray.baseArray[i][:], rt)
}
return br
}
func (r *RandaoMixes) SetFromBaseField(field [fieldparams.RandaoMixesLength][32]byte) {
r.baseArray.baseArray = &field
}
func (r *RandaoMixes) RootAtIndex(idx uint64) [32]byte {
if val, ok := r.fieldJournal[idx]; ok {
return val
}
return r.baseArray.RootAtIndex(idx)
}
func (r *RandaoMixes) SetRootAtIndex(idx uint64, val [32]byte) {
if r.Refs() <= 1 && r.baseArray.Refs() <= 1 {
r.baseArray.baseArray[idx] = val
return
}
if r.Refs() <= 1 {
r.fieldJournal[idx] = val
return
}
newJournal := make(map[uint64][32]byte)
for k, val := range r.fieldJournal {
newJournal[k] = val
}
r.fieldJournal = newJournal
r.MinusRef()
r.Reference = stateutil.NewRef(1)
r.fieldJournal[idx] = val
}
func (r *RandaoMixes) Copy() *RandaoMixes {
r.baseArray.AddRef()
r.Reference.AddRef()
rm := &RandaoMixes{
baseArray: r.baseArray,
fieldJournal: r.fieldJournal,
Reference: r.Reference,
}
return rm
}
func (r *RandaoMixes) TotalLength() uint64 {
return fieldparams.RandaoMixesLength
}
func (r *RandaoMixes) IncreaseRef() {
r.Reference.AddRef()
r.baseArray.Reference.AddRef()
}
func (r *RandaoMixes) DecreaseRef() {
r.Reference.MinusRef()
r.baseArray.Reference.MinusRef()
return mixes
}

View File

@@ -76,8 +76,8 @@ func (b *BeaconState) BlockRootAtIndex(idx uint64) ([]byte, error) {
// input index value.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) blockRootAtIndex(idx uint64) ([32]byte, error) {
if b.blockRoots.TotalLength() <= idx {
if uint64(len(b.blockRoots)) <= idx {
return [32]byte{}, fmt.Errorf("index %d out of range", idx)
}
return b.blockRoots.RootAtIndex(idx), nil
return b.blockRoots[idx], nil
}

View File

@@ -37,10 +37,10 @@ func (b *BeaconState) RandaoMixAtIndex(idx uint64) ([]byte, error) {
// input index value.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) randaoMixAtIndex(idx uint64) ([32]byte, error) {
if b.randaoMixes.TotalLength() <= idx {
if uint64(len(b.randaoMixes)) <= idx {
return [32]byte{}, fmt.Errorf("index %d out of range", idx)
}
return b.randaoMixes.RootAtIndex(idx), nil
return b.randaoMixes[idx], nil
}
// RandaoMixesLength returns the length of the randao mixes slice.
@@ -62,5 +62,5 @@ func (b *BeaconState) randaoMixesLength() int {
return 0
}
return int(b.randaoMixes.TotalLength()) // lint:ignore uintcast- ajhdjhd
return len(b.randaoMixes)
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
customtypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/custom-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -24,12 +25,14 @@ func (b *BeaconState) SetBlockRoots(val [][]byte) error {
b.lock.Lock()
defer b.lock.Unlock()
b.sharedFieldReferences[blockRoots].MinusRef()
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
var rootsArr [fieldparams.BlockRootsLength][32]byte
for i := 0; i < len(rootsArr); i++ {
copy(rootsArr[i][:], val[i])
}
roots := customtypes.BlockRoots{}
roots.SetFromBaseField(rootsArr)
roots := customtypes.BlockRoots(rootsArr)
b.blockRoots = &roots
b.markFieldAsDirty(blockRoots)
b.rebuildTrie[blockRoots] = true
@@ -39,13 +42,24 @@ func (b *BeaconState) SetBlockRoots(val [][]byte) error {
// UpdateBlockRootAtIndex for the beacon state. Updates the block root
// at a specific index to a new value.
func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) error {
if b.blockRoots.TotalLength() <= idx {
if uint64(len(b.blockRoots)) <= idx {
return fmt.Errorf("invalid index provided %d", idx)
}
b.lock.Lock()
defer b.lock.Unlock()
b.blockRoots.SetRootAtIndex(idx, blockRoot)
r := b.blockRoots
if ref := b.sharedFieldReferences[blockRoots]; ref.Refs() > 1 {
// Copy elements in underlying array by reference.
roots := *b.blockRoots
rootsCopy := roots
r = &rootsCopy
ref.MinusRef()
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
}
r[idx] = blockRoot
b.blockRoots = r
b.markFieldAsDirty(blockRoots)
b.addDirtyIndices(blockRoots, []uint64{idx})

View File

@@ -3,6 +3,7 @@ package v1
import (
"github.com/pkg/errors"
customtypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/custom-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
@@ -13,12 +14,14 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
b.lock.Lock()
defer b.lock.Unlock()
b.sharedFieldReferences[randaoMixes].MinusRef()
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
var mixesArr [fieldparams.RandaoMixesLength][32]byte
for i := 0; i < len(mixesArr); i++ {
copy(mixesArr[i][:], val[i])
}
mixes := customtypes.RandaoMixes{}
mixes.SetFromBaseField(mixesArr)
mixes := customtypes.RandaoMixes(mixesArr)
b.randaoMixes = &mixes
b.markFieldAsDirty(randaoMixes)
b.rebuildTrie[randaoMixes] = true
@@ -28,13 +31,24 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
// UpdateRandaoMixesAtIndex for the beacon state. Updates the randao mixes
// at a specific index to a new value.
func (b *BeaconState) UpdateRandaoMixesAtIndex(idx uint64, val []byte) error {
if b.randaoMixes.TotalLength() <= idx {
if uint64(len(b.randaoMixes)) <= idx {
return errors.Errorf("invalid index provided %d", idx)
}
b.lock.Lock()
defer b.lock.Unlock()
b.randaoMixes.SetRootAtIndex(idx, bytesutil.ToBytes32(val))
mixes := b.randaoMixes
if refs := b.sharedFieldReferences[randaoMixes].Refs(); refs > 1 {
// Copy elements in underlying array by reference.
m := *b.randaoMixes
mCopy := m
mixes = &mCopy
b.sharedFieldReferences[randaoMixes].MinusRef()
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
}
mixes[idx] = bytesutil.ToBytes32(val)
b.randaoMixes = mixes
b.markFieldAsDirty(randaoMixes)
b.addDirtyIndices(randaoMixes, []uint64{idx})

View File

@@ -36,8 +36,10 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconState) (state.BeaconState, error)
return nil, errors.New("received nil state")
}
bRoots := customtypes.SetFromSlice(st.BlockRoots)
var bRoots customtypes.BlockRoots
for i, r := range st.BlockRoots {
copy(bRoots[i][:], r)
}
var sRoots customtypes.StateRoots
for i, r := range st.StateRoots {
copy(sRoots[i][:], r)
@@ -46,7 +48,10 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconState) (state.BeaconState, error)
for i, r := range st.HistoricalRoots {
copy(hRoots[i][:], r)
}
mixes := customtypes.SetFromSliceRandao(st.RandaoMixes)
var mixes customtypes.RandaoMixes
for i, m := range st.RandaoMixes {
copy(mixes[i][:], m)
}
fieldCount := params.BeaconConfig().BeaconStateFieldCount
b := &BeaconState{
@@ -55,7 +60,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconState) (state.BeaconState, error)
slot: st.Slot,
fork: st.Fork,
latestBlockHeader: st.LatestBlockHeader,
blockRoots: bRoots,
blockRoots: &bRoots,
stateRoots: &sRoots,
historicalRoots: hRoots,
eth1Data: st.Eth1Data,
@@ -63,7 +68,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconState) (state.BeaconState, error)
eth1DepositIndex: st.Eth1DepositIndex,
validators: st.Validators,
balances: st.Balances,
randaoMixes: mixes,
randaoMixes: &mixes,
slashings: st.Slashings,
previousEpochAttestations: st.PreviousEpochAttestations,
currentEpochAttestations: st.CurrentEpochAttestations,
@@ -94,6 +99,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconState) (state.BeaconState, error)
// Initialize field reference tracking for shared data.
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[previousEpochAttestations] = stateutil.NewRef(1)
b.sharedFieldReferences[currentEpochAttestations] = stateutil.NewRef(1)
b.sharedFieldReferences[slashings] = stateutil.NewRef(1)
@@ -121,9 +127,9 @@ func (b *BeaconState) Copy() state.BeaconState {
slashings: b.slashings,
// Large arrays, infrequently changed, constant size.
blockRoots: b.blockRoots.Copy(),
blockRoots: b.blockRoots,
stateRoots: b.stateRoots,
randaoMixes: b.randaoMixes.Copy(),
randaoMixes: b.randaoMixes,
previousEpochAttestations: b.previousEpochAttestations,
currentEpochAttestations: b.currentEpochAttestations,
eth1DataVotes: b.eth1DataVotes,
@@ -205,9 +211,6 @@ func (b *BeaconState) Copy() state.BeaconState {
}
}
b.blockRoots.MinusRef()
b.randaoMixes.MinusRef()
for i := 0; i < fieldCount; i++ {
field := types.FieldIndex(i)
delete(b.stateFieldLeaves, field)

View File

@@ -76,8 +76,9 @@ func (b *BeaconState) BlockRootAtIndex(idx uint64) ([]byte, error) {
// input index value.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) blockRootAtIndex(idx uint64) ([32]byte, error) {
if b.blockRoots.TotalLength() <= idx {
if uint64(len(b.blockRoots)) <= idx {
return [32]byte{}, fmt.Errorf("index %d out of range", idx)
}
return b.blockRoots.RootAtIndex(idx), nil
return b.blockRoots[idx], nil
}

View File

@@ -37,10 +37,11 @@ func (b *BeaconState) RandaoMixAtIndex(idx uint64) ([]byte, error) {
// input index value.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) randaoMixAtIndex(idx uint64) ([32]byte, error) {
if b.randaoMixes.TotalLength() <= idx {
if uint64(len(b.randaoMixes)) <= idx {
return [32]byte{}, fmt.Errorf("index %d out of range", idx)
}
return b.randaoMixes.RootAtIndex(idx), nil
return b.randaoMixes[idx], nil
}
// RandaoMixesLength returns the length of the randao mixes slice.
@@ -62,5 +63,5 @@ func (b *BeaconState) randaoMixesLength() int {
return 0
}
return int(b.randaoMixes.TotalLength()) // lint:ignore uintcast- ajhdjhd
return len(b.randaoMixes)
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
customtypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/custom-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -24,12 +25,14 @@ func (b *BeaconState) SetBlockRoots(val [][]byte) error {
b.lock.Lock()
defer b.lock.Unlock()
b.sharedFieldReferences[blockRoots].MinusRef()
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
var rootsArr [fieldparams.BlockRootsLength][32]byte
for i := 0; i < len(rootsArr); i++ {
copy(rootsArr[i][:], val[i])
}
roots := customtypes.BlockRoots{}
roots.SetFromBaseField(rootsArr)
roots := customtypes.BlockRoots(rootsArr)
b.blockRoots = &roots
b.markFieldAsDirty(blockRoots)
b.rebuildTrie[blockRoots] = true
@@ -39,13 +42,24 @@ func (b *BeaconState) SetBlockRoots(val [][]byte) error {
// UpdateBlockRootAtIndex for the beacon state. Updates the block root
// at a specific index to a new value.
func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) error {
if b.blockRoots.TotalLength() <= idx {
if uint64(len(b.blockRoots)) <= idx {
return fmt.Errorf("invalid index provided %d", idx)
}
b.lock.Lock()
defer b.lock.Unlock()
b.blockRoots.SetRootAtIndex(idx, blockRoot)
r := b.blockRoots
if ref := b.sharedFieldReferences[blockRoots]; ref.Refs() > 1 {
// Copy elements in underlying array by reference.
roots := *b.blockRoots
rootsCopy := roots
r = &rootsCopy
ref.MinusRef()
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
}
r[idx] = blockRoot
b.blockRoots = r
b.markFieldAsDirty(blockRoots)
b.addDirtyIndices(blockRoots, []uint64{idx})

View File

@@ -3,6 +3,7 @@ package v2
import (
"github.com/pkg/errors"
customtypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/custom-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
@@ -13,12 +14,14 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
b.lock.Lock()
defer b.lock.Unlock()
b.sharedFieldReferences[randaoMixes].MinusRef()
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
var mixesArr [fieldparams.RandaoMixesLength][32]byte
for i := 0; i < len(mixesArr); i++ {
copy(mixesArr[i][:], val[i])
}
mixes := customtypes.RandaoMixes{}
mixes.SetFromBaseField(mixesArr)
mixes := customtypes.RandaoMixes(mixesArr)
b.randaoMixes = &mixes
b.markFieldAsDirty(randaoMixes)
b.rebuildTrie[randaoMixes] = true
@@ -28,13 +31,24 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
// UpdateRandaoMixesAtIndex for the beacon state. Updates the randao mixes
// at a specific index to a new value.
func (b *BeaconState) UpdateRandaoMixesAtIndex(idx uint64, val []byte) error {
if b.randaoMixes.TotalLength() <= idx {
if uint64(len(b.randaoMixes)) <= idx {
return errors.Errorf("invalid index provided %d", idx)
}
b.lock.Lock()
defer b.lock.Unlock()
b.randaoMixes.SetRootAtIndex(idx, bytesutil.ToBytes32(val))
mixes := b.randaoMixes
if refs := b.sharedFieldReferences[randaoMixes].Refs(); refs > 1 {
// Copy elements in underlying array by reference.
m := *b.randaoMixes
mCopy := m
mixes = &mCopy
b.sharedFieldReferences[randaoMixes].MinusRef()
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
}
mixes[idx] = bytesutil.ToBytes32(val)
b.randaoMixes = mixes
b.markFieldAsDirty(randaoMixes)
b.addDirtyIndices(randaoMixes, []uint64{idx})

View File

@@ -35,8 +35,10 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateAltair) (*BeaconState, error
return nil, errors.New("received nil state")
}
bRoots := customtypes.SetFromSlice(st.BlockRoots)
var bRoots customtypes.BlockRoots
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
var sRoots customtypes.StateRoots
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
@@ -45,7 +47,10 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateAltair) (*BeaconState, error
for i, r := range st.HistoricalRoots {
hRoots[i] = bytesutil.ToBytes32(r)
}
mixes := customtypes.SetFromSliceRandao(st.RandaoMixes)
var mixes customtypes.RandaoMixes
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
fieldCount := params.BeaconConfig().BeaconStateAltairFieldCount
b := &BeaconState{
@@ -54,7 +59,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateAltair) (*BeaconState, error
slot: st.Slot,
fork: st.Fork,
latestBlockHeader: st.LatestBlockHeader,
blockRoots: bRoots,
blockRoots: &bRoots,
stateRoots: &sRoots,
historicalRoots: hRoots,
eth1Data: st.Eth1Data,
@@ -62,7 +67,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateAltair) (*BeaconState, error
eth1DepositIndex: st.Eth1DepositIndex,
validators: st.Validators,
balances: st.Balances,
randaoMixes: mixes,
randaoMixes: &mixes,
slashings: st.Slashings,
previousEpochParticipation: st.PreviousEpochParticipation,
currentEpochParticipation: st.CurrentEpochParticipation,
@@ -96,6 +101,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateAltair) (*BeaconState, error
// Initialize field reference tracking for shared data.
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[previousEpochParticipationBits] = stateutil.NewRef(1) // New in Altair.
b.sharedFieldReferences[currentEpochParticipationBits] = stateutil.NewRef(1) // New in Altair.
b.sharedFieldReferences[slashings] = stateutil.NewRef(1)
@@ -122,9 +128,9 @@ func (b *BeaconState) Copy() state.BeaconState {
eth1DepositIndex: b.eth1DepositIndex,
// Large arrays, infrequently changed, constant size.
blockRoots: b.blockRoots.Copy(),
blockRoots: b.blockRoots,
stateRoots: b.stateRoots,
randaoMixes: b.randaoMixes.Copy(),
randaoMixes: b.randaoMixes,
slashings: b.slashings,
eth1DataVotes: b.eth1DataVotes,
@@ -209,8 +215,6 @@ func (b *BeaconState) Copy() state.BeaconState {
b.stateFieldLeaves[field].FieldReference().MinusRef()
}
}
b.blockRoots.DecreaseRef()
b.randaoMixes.DecreaseRef()
for i := 0; i < fieldCount; i++ {
field := types.FieldIndex(i)
delete(b.stateFieldLeaves, field)

View File

@@ -76,8 +76,9 @@ func (b *BeaconState) BlockRootAtIndex(idx uint64) ([]byte, error) {
// input index value.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) blockRootAtIndex(idx uint64) ([32]byte, error) {
if b.blockRoots.TotalLength() <= idx {
if uint64(len(b.blockRoots)) <= idx {
return [32]byte{}, fmt.Errorf("index %d out of range", idx)
}
return b.blockRoots.RootAtIndex(idx), nil
return b.blockRoots[idx], nil
}

View File

@@ -37,10 +37,11 @@ func (b *BeaconState) RandaoMixAtIndex(idx uint64) ([]byte, error) {
// input index value.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) randaoMixAtIndex(idx uint64) ([32]byte, error) {
if b.randaoMixes.TotalLength() <= idx {
if uint64(len(b.randaoMixes)) <= idx {
return [32]byte{}, fmt.Errorf("index %d out of range", idx)
}
return b.randaoMixes.RootAtIndex(idx), nil
return b.randaoMixes[idx], nil
}
// RandaoMixesLength returns the length of the randao mixes slice.
@@ -62,5 +63,5 @@ func (b *BeaconState) randaoMixesLength() int {
return 0
}
return int(b.randaoMixes.TotalLength()) // lint:ignore uintcast- ajhdjhd
return len(b.randaoMixes)
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
customtypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/custom-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -24,12 +25,14 @@ func (b *BeaconState) SetBlockRoots(val [][]byte) error {
b.lock.Lock()
defer b.lock.Unlock()
b.sharedFieldReferences[blockRoots].MinusRef()
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
var rootsArr [fieldparams.BlockRootsLength][fieldparams.RootLength]byte
for i := 0; i < len(rootsArr); i++ {
copy(rootsArr[i][:], val[i])
}
roots := customtypes.BlockRoots{}
roots.SetFromBaseField(rootsArr)
roots := customtypes.BlockRoots(rootsArr)
b.blockRoots = &roots
b.markFieldAsDirty(blockRoots)
b.rebuildTrie[blockRoots] = true
@@ -39,13 +42,24 @@ func (b *BeaconState) SetBlockRoots(val [][]byte) error {
// UpdateBlockRootAtIndex for the beacon state. Updates the block root
// at a specific index to a new value.
func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) error {
if b.blockRoots.TotalLength() <= idx {
if uint64(len(b.blockRoots)) <= idx {
return fmt.Errorf("invalid index provided %d", idx)
}
b.lock.Lock()
defer b.lock.Unlock()
b.blockRoots.SetRootAtIndex(idx, blockRoot)
r := b.blockRoots
if ref := b.sharedFieldReferences[blockRoots]; ref.Refs() > 1 {
// Copy elements in underlying array by reference.
roots := *b.blockRoots
rootsCopy := roots
r = &rootsCopy
ref.MinusRef()
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
}
r[idx] = blockRoot
b.blockRoots = r
b.markFieldAsDirty(blockRoots)
b.addDirtyIndices(blockRoots, []uint64{idx})

View File

@@ -3,6 +3,7 @@ package v3
import (
"github.com/pkg/errors"
customtypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/custom-types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
@@ -13,12 +14,14 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
b.lock.Lock()
defer b.lock.Unlock()
var mixesArr [fieldparams.RandaoMixesLength][32]byte
b.sharedFieldReferences[randaoMixes].MinusRef()
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
var mixesArr [fieldparams.RandaoMixesLength][fieldparams.RootLength]byte
for i := 0; i < len(mixesArr); i++ {
copy(mixesArr[i][:], val[i])
}
mixes := customtypes.RandaoMixes{}
mixes.SetFromBaseField(mixesArr)
mixes := customtypes.RandaoMixes(mixesArr)
b.randaoMixes = &mixes
b.markFieldAsDirty(randaoMixes)
b.rebuildTrie[randaoMixes] = true
@@ -28,13 +31,24 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
// UpdateRandaoMixesAtIndex for the beacon state. Updates the randao mixes
// at a specific index to a new value.
func (b *BeaconState) UpdateRandaoMixesAtIndex(idx uint64, val []byte) error {
if b.randaoMixes.TotalLength() <= idx {
if uint64(len(b.randaoMixes)) <= idx {
return errors.Errorf("invalid index provided %d", idx)
}
b.lock.Lock()
defer b.lock.Unlock()
b.randaoMixes.SetRootAtIndex(idx, bytesutil.ToBytes32(val))
mixes := b.randaoMixes
if refs := b.sharedFieldReferences[randaoMixes].Refs(); refs > 1 {
// Copy elements in underlying array by reference.
m := *b.randaoMixes
mCopy := m
mixes = &mCopy
b.sharedFieldReferences[randaoMixes].MinusRef()
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
}
mixes[idx] = bytesutil.ToBytes32(val)
b.randaoMixes = mixes
b.markFieldAsDirty(randaoMixes)
b.addDirtyIndices(randaoMixes, []uint64{idx})

View File

@@ -36,8 +36,10 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateBellatrix) (state.BeaconStat
return nil, errors.New("received nil state")
}
bRoots := customtypes.SetFromSlice(st.BlockRoots)
var bRoots customtypes.BlockRoots
for i, r := range st.BlockRoots {
bRoots[i] = bytesutil.ToBytes32(r)
}
var sRoots customtypes.StateRoots
for i, r := range st.StateRoots {
sRoots[i] = bytesutil.ToBytes32(r)
@@ -46,7 +48,10 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateBellatrix) (state.BeaconStat
for i, r := range st.HistoricalRoots {
hRoots[i] = bytesutil.ToBytes32(r)
}
mixes := customtypes.SetFromSliceRandao(st.RandaoMixes)
var mixes customtypes.RandaoMixes
for i, m := range st.RandaoMixes {
mixes[i] = bytesutil.ToBytes32(m)
}
fieldCount := params.BeaconConfig().BeaconStateBellatrixFieldCount
b := &BeaconState{
@@ -55,7 +60,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateBellatrix) (state.BeaconStat
slot: st.Slot,
fork: st.Fork,
latestBlockHeader: st.LatestBlockHeader,
blockRoots: bRoots,
blockRoots: &bRoots,
stateRoots: &sRoots,
historicalRoots: hRoots,
eth1Data: st.Eth1Data,
@@ -63,7 +68,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateBellatrix) (state.BeaconStat
eth1DepositIndex: st.Eth1DepositIndex,
validators: st.Validators,
balances: st.Balances,
randaoMixes: mixes,
randaoMixes: &mixes,
slashings: st.Slashings,
previousEpochParticipation: st.PreviousEpochParticipation,
currentEpochParticipation: st.CurrentEpochParticipation,
@@ -96,7 +101,9 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateBellatrix) (state.BeaconStat
}
// Initialize field reference tracking for shared data.
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
b.sharedFieldReferences[previousEpochParticipationBits] = stateutil.NewRef(1) // New in Altair.
b.sharedFieldReferences[currentEpochParticipationBits] = stateutil.NewRef(1) // New in Altair.
b.sharedFieldReferences[slashings] = stateutil.NewRef(1)
@@ -123,9 +130,9 @@ func (b *BeaconState) Copy() state.BeaconState {
eth1DepositIndex: b.eth1DepositIndex,
// Large arrays, infrequently changed, constant size.
randaoMixes: b.randaoMixes.Copy(),
randaoMixes: b.randaoMixes,
stateRoots: b.stateRoots,
blockRoots: b.blockRoots.Copy(),
blockRoots: b.blockRoots,
slashings: b.slashings,
eth1DataVotes: b.eth1DataVotes,
@@ -201,7 +208,6 @@ func (b *BeaconState) Copy() state.BeaconState {
}
}
}
state.StateCount.Inc()
// Finalizer runs when dst is being destroyed in garbage collection.
runtime.SetFinalizer(dst, func(b *BeaconState) {
@@ -211,9 +217,6 @@ func (b *BeaconState) Copy() state.BeaconState {
b.stateFieldLeaves[field].FieldReference().MinusRef()
}
}
b.blockRoots.DecreaseRef()
b.randaoMixes.DecreaseRef()
for i := 0; i < fieldCount; i++ {
field := types.FieldIndex(i)
delete(b.stateFieldLeaves, field)

View File

@@ -49,29 +49,30 @@ type Service struct {
synced *abool.AtomicBool
chainStarted *abool.AtomicBool
counter *ratecounter.RateCounter
genesisChan chan time.Time
}
// NewService configures the initial sync service responsible for bringing the node up to the
// latest head of the blockchain.
func NewService(ctx context.Context, cfg *Config) *Service {
ctx, cancel := context.WithCancel(ctx)
return &Service{
s := &Service{
cfg: cfg,
ctx: ctx,
cancel: cancel,
synced: abool.New(),
chainStarted: abool.New(),
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
genesisChan: make(chan time.Time),
}
go s.waitForStateInitialization()
return s
}
// Start the initial sync service.
func (s *Service) Start() {
genesis, err := s.waitForStateInitialization()
if err != nil {
log.WithError(err).Fatal("Failed to wait for state initialization.")
return
}
// Wait for state initialized event.
genesis := <-s.genesisChan
if genesis.IsZero() {
log.Debug("Exiting Initial Sync Service")
return
@@ -179,10 +180,9 @@ func (s *Service) waitForMinimumPeers() {
}
}
// TODO: Return error
// waitForStateInitialization makes sure that beacon node is ready to be accessed: it is either
// already properly configured or system waits up until state initialized event is triggered.
func (s *Service) waitForStateInitialization() (time.Time, error) {
func (s *Service) waitForStateInitialization() {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
@@ -198,14 +198,19 @@ func (s *Service) waitForStateInitialization() (time.Time, error) {
continue
}
log.WithField("starttime", data.StartTime).Debug("Received state initialized event")
return data.StartTime, nil
s.genesisChan <- data.StartTime
return
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
// Send a zero time in the event we are exiting.
return time.Time{}, errors.New("context closed, exiting goroutine")
s.genesisChan <- time.Time{}
return
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state notifier failed")
// Send a zero time in the event we are exiting.
return time.Time{}, errors.Wrap(err, "subscription to state notifier failed")
s.genesisChan <- time.Time{}
return
}
}
}

View File

@@ -168,7 +168,11 @@ func TestService_InitStartStop(t *testing.T) {
Chain: mc,
StateNotifier: notifier,
})
time.Sleep(500 * time.Millisecond)
assert.NotNil(t, s)
if tt.methodRuns != nil {
tt.methodRuns(notifier.StateFeed())
}
wg := &sync.WaitGroup{}
wg.Add(1)
@@ -177,11 +181,6 @@ func TestService_InitStartStop(t *testing.T) {
wg.Done()
}()
time.Sleep(500 * time.Millisecond)
if tt.methodRuns != nil {
tt.methodRuns(notifier.StateFeed())
}
go func() {
// Allow to exit from test (on no head loop waiting for head is started).
// In most tests, this is redundant, as Start() already exited.
@@ -208,6 +207,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
synced: abool.New(),
chainStarted: abool.New(),
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
genesisChan: make(chan time.Time),
}
return s
}
@@ -221,8 +221,9 @@ func TestService_waitForStateInitialization(t *testing.T) {
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
_, err := s.waitForStateInitialization()
assert.ErrorContains(t, "context closed", err)
go s.waitForStateInitialization()
currTime := <-s.genesisChan
assert.Equal(t, true, currTime.IsZero())
wg.Done()
}()
go func() {
@@ -235,6 +236,8 @@ func TestService_waitForStateInitialization(t *testing.T) {
t.Fatalf("Test should have exited by now, timed out")
}
assert.LogsContain(t, hook, "Waiting for state to be initialized")
assert.LogsContain(t, hook, "Context closed, exiting goroutine")
assert.LogsDoNotContain(t, hook, "Subscription to state notifier failed")
})
t.Run("no state and state init event received", func(t *testing.T) {
@@ -248,9 +251,8 @@ func TestService_waitForStateInitialization(t *testing.T) {
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
var err error
receivedGenesisTime, err = s.waitForStateInitialization()
require.NoError(t, err)
go s.waitForStateInitialization()
receivedGenesisTime = <-s.genesisChan
assert.Equal(t, false, receivedGenesisTime.IsZero())
wg.Done()
}()
@@ -279,6 +281,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
assert.LogsContain(t, hook, "Event feed data is not type *statefeed.InitializedData")
assert.LogsContain(t, hook, "Waiting for state to be initialized")
assert.LogsContain(t, hook, "Received state initialized event")
assert.LogsDoNotContain(t, hook, "Context closed, exiting goroutine")
})
t.Run("no state and state init event received and service start", func(t *testing.T) {
@@ -293,8 +296,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
_, err := s.waitForStateInitialization()
require.NoError(t, err)
s.waitForStateInitialization()
wg.Done()
}()
@@ -319,6 +321,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
}
assert.LogsContain(t, hook, "Waiting for state to be initialized")
assert.LogsContain(t, hook, "Received state initialized event")
assert.LogsDoNotContain(t, hook, "Context closed, exiting goroutine")
})
}

View File

@@ -154,7 +154,6 @@ func NewService(ctx context.Context, opts ...Option) *Service {
}
r.subHandler = newSubTopicHandler()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
r.initCaches()
go r.registerHandlers()
go r.verifierRoutine()
@@ -164,6 +163,8 @@ func NewService(ctx context.Context, opts ...Option) *Service {
// Start the regular sync service.
func (s *Service) Start() {
s.initCaches()
s.cfg.p2p.AddConnectionHandler(s.reValidatePeer, s.sendGoodbye)
s.cfg.p2p.AddDisconnectionHandler(func(_ context.Context, _ peer.ID) error {
// no-op

View File

@@ -28,6 +28,7 @@ var Commands = &cli.Command{
flags.WalletPasswordFileFlag,
flags.DeletePublicKeysFlag,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),
@@ -61,6 +62,7 @@ var Commands = &cli.Command{
flags.GrpcRetriesFlag,
flags.GrpcRetryDelayFlag,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),
@@ -91,6 +93,7 @@ var Commands = &cli.Command{
flags.BackupPublicKeysFlag,
flags.BackupPasswordFile,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),
@@ -118,6 +121,7 @@ var Commands = &cli.Command{
flags.AccountPasswordFileFlag,
flags.ImportPrivateKeyFileFlag,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),
@@ -151,6 +155,7 @@ var Commands = &cli.Command{
flags.GrpcRetryDelayFlag,
flags.ExitAllFlag,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),

View File

@@ -22,6 +22,7 @@ var Commands = &cli.Command{
cmd.DataDirFlag,
flags.SlashingProtectionExportDirFlag,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),
@@ -46,6 +47,7 @@ var Commands = &cli.Command{
cmd.DataDirFlag,
flags.SlashingProtectionJSONFileFlag,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),

View File

@@ -34,6 +34,7 @@ var Commands = &cli.Command{
flags.Mnemonic25thWordFileFlag,
flags.SkipMnemonic25thWordCheckFlag,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),
@@ -63,6 +64,7 @@ var Commands = &cli.Command{
flags.RemoteSignerKeyPathFlag,
flags.RemoteSignerCACertPathFlag,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),
@@ -91,6 +93,7 @@ var Commands = &cli.Command{
flags.Mnemonic25thWordFileFlag,
flags.SkipMnemonic25thWordCheckFlag,
features.Mainnet,
features.PyrmontTestnet,
features.PraterTestnet,
cmd.AcceptTosFlag,
}),

View File

@@ -35,6 +35,9 @@ const disabledFeatureFlag = "Disabled feature flag"
// Flags is a struct to represent which features the client will perform on runtime.
type Flags struct {
// Testnet Flags.
PyrmontTestnet bool // PyrmontTestnet defines the flag through which we can enable the node to run on the Pyrmont testnet.
// Feature related flags.
RemoteSlasherProtection bool // RemoteSlasherProtection utilizes a beacon node with --slasher mode for validator slashing protection.
WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory.
@@ -118,8 +121,13 @@ func InitWithReset(c *Flags) func() {
}
// configureTestnet sets the config according to specified testnet flag
func configureTestnet(ctx *cli.Context) {
if ctx.Bool(PraterTestnet.Name) {
func configureTestnet(ctx *cli.Context, cfg *Flags) {
if ctx.Bool(PyrmontTestnet.Name) {
log.Warn("Running on Pyrmont Testnet")
params.UsePyrmontConfig()
params.UsePyrmontNetworkConfig()
cfg.PyrmontTestnet = true
} else if ctx.Bool(PraterTestnet.Name) {
log.Warn("Running on the Prater Testnet")
params.UsePraterConfig()
params.UsePraterNetworkConfig()
@@ -137,7 +145,7 @@ func ConfigureBeaconChain(ctx *cli.Context) {
if ctx.Bool(devModeFlag.Name) {
enableDevModeFlags(ctx)
}
configureTestnet(ctx)
configureTestnet(ctx, cfg)
if ctx.Bool(writeSSZStateTransitionsFlag.Name) {
logEnabled(writeSSZStateTransitionsFlag)
@@ -232,7 +240,7 @@ func ConfigureBeaconChain(ctx *cli.Context) {
func ConfigureValidator(ctx *cli.Context) {
complainOnDeprecatedFlags(ctx)
cfg := &Flags{}
configureTestnet(ctx)
configureTestnet(ctx, cfg)
if ctx.Bool(enableExternalSlasherProtectionFlag.Name) {
log.Fatal(
"Remote slashing protection has currently been disabled in Prysm due to safety concerns. " +

View File

@@ -11,41 +11,41 @@ import (
func TestInitFeatureConfig(t *testing.T) {
defer Init(&Flags{})
cfg := &Flags{
EnablePeerScorer: true,
PyrmontTestnet: true,
}
Init(cfg)
c := Get()
assert.Equal(t, true, c.EnablePeerScorer)
assert.Equal(t, true, c.PyrmontTestnet)
// Reset back to false for the follow up tests.
cfg = &Flags{RemoteSlasherProtection: false}
cfg = &Flags{PyrmontTestnet: false}
Init(cfg)
}
func TestInitWithReset(t *testing.T) {
defer Init(&Flags{})
Init(&Flags{
EnablePeerScorer: true,
PyrmontTestnet: true,
})
assert.Equal(t, true, Get().EnablePeerScorer)
assert.Equal(t, true, Get().PyrmontTestnet)
// Overwrite previously set value (value that didn't come by default).
resetCfg := InitWithReset(&Flags{
EnablePeerScorer: false,
PyrmontTestnet: false,
})
assert.Equal(t, false, Get().EnablePeerScorer)
assert.Equal(t, false, Get().PyrmontTestnet)
// Reset must get to previously set configuration (not to default config values).
resetCfg()
assert.Equal(t, true, Get().EnablePeerScorer)
assert.Equal(t, true, Get().PyrmontTestnet)
}
func TestConfigureBeaconConfig(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.Bool(enablePeerScorer.Name, true, "test")
set.Bool(PyrmontTestnet.Name, true, "test")
context := cli.NewContext(&app, set, nil)
ConfigureBeaconChain(context)
c := Get()
assert.Equal(t, true, c.EnablePeerScorer)
assert.Equal(t, true, c.PyrmontTestnet)
}

View File

@@ -70,11 +70,6 @@ var (
Usage: deprecatedUsage,
Hidden: true,
}
deprecatedPyrmontTestnet = &cli.BoolFlag{
Name: "pyrmont",
Usage: deprecatedUsage,
Hidden: true,
}
)
var deprecatedFlags = []cli.Flag{
@@ -89,5 +84,4 @@ var deprecatedFlags = []cli.Flag{
deprecatedDisableNextSlotStateCache,
deprecatedAttestationAggregationStrategy,
deprecatedForceOptMaxCoverAggregationStategy,
deprecatedPyrmontTestnet,
}

View File

@@ -7,6 +7,11 @@ import (
)
var (
// PyrmontTestnet flag for the multiclient Ethereum consensus testnet.
PyrmontTestnet = &cli.BoolFlag{
Name: "pyrmont",
Usage: "This defines the flag through which we can run on the Pyrmont Multiclient Testnet",
}
// PraterTestnet flag for the multiclient Ethereum consensus testnet.
PraterTestnet = &cli.BoolFlag{
Name: "prater",
@@ -144,7 +149,6 @@ var devModeFlags = []cli.Flag{
enablePeerScorer,
enableVecHTR,
enableForkChoiceDoublyLinkedTree,
enableNativeState,
}
// ValidatorFlags contains a list of all the feature flags that apply to the validator client.
@@ -152,6 +156,7 @@ var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
writeWalletPasswordOnWebOnboarding,
enableExternalSlasherProtectionFlag,
disableAttestingHistoryDBCache,
PyrmontTestnet,
PraterTestnet,
Mainnet,
dynamicKeyReloadDebounceInterval,
@@ -170,6 +175,7 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
devModeFlag,
writeSSZStateTransitionsFlag,
disableGRPCConnectionLogging,
PyrmontTestnet,
PraterTestnet,
Mainnet,
enablePeerScorer,

View File

@@ -13,6 +13,7 @@ go_library(
"network_config.go",
"testnet_e2e_config.go",
"testnet_prater_config.go",
"testnet_pyrmont_config.go",
"testutils.go",
"values.go",
],
@@ -47,7 +48,6 @@ go_test(
"@consensus_spec_tests_mainnet//:test_data",
"@consensus_spec_tests_minimal//:test_data",
"@eth2_networks//:configs",
"testdata/e2e_config.yaml",
],
gotags = ["develop"],
race = "on",
@@ -61,9 +61,3 @@ go_test(
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)
filegroup(
name = "custom_configs",
srcs = glob(["testdata/*.yaml"]),
visibility = ["//testing:__subpackages__"],
)

View File

@@ -18,94 +18,94 @@ import (
var placeholderFields = []string{"UPDATE_TIMEOUT", "INTERVALS_PER_SLOT"}
func TestLoadConfigFile(t *testing.T) {
func TestLoadConfigFileMainnet(t *testing.T) {
// See https://media.githubusercontent.com/media/ethereum/consensus-spec-tests/master/tests/minimal/config/phase0.yaml
assertVals := func(name string, fields []string, expected, actual *params.BeaconChainConfig) {
assertVals := func(name string, fields []string, c1, c2 *params.BeaconChainConfig) {
// Misc params.
assert.Equal(t, expected.MaxCommitteesPerSlot, actual.MaxCommitteesPerSlot, "%s: MaxCommitteesPerSlot", name)
assert.Equal(t, expected.TargetCommitteeSize, actual.TargetCommitteeSize, "%s: TargetCommitteeSize", name)
assert.Equal(t, expected.MaxValidatorsPerCommittee, actual.MaxValidatorsPerCommittee, "%s: MaxValidatorsPerCommittee", name)
assert.Equal(t, expected.MinPerEpochChurnLimit, actual.MinPerEpochChurnLimit, "%s: MinPerEpochChurnLimit", name)
assert.Equal(t, expected.ChurnLimitQuotient, actual.ChurnLimitQuotient, "%s: ChurnLimitQuotient", name)
assert.Equal(t, expected.ShuffleRoundCount, actual.ShuffleRoundCount, "%s: ShuffleRoundCount", name)
assert.Equal(t, expected.MinGenesisActiveValidatorCount, actual.MinGenesisActiveValidatorCount, "%s: MinGenesisActiveValidatorCount", name)
assert.Equal(t, expected.MinGenesisTime, actual.MinGenesisTime, "%s: MinGenesisTime", name)
assert.Equal(t, expected.HysteresisQuotient, actual.HysteresisQuotient, "%s: HysteresisQuotient", name)
assert.Equal(t, expected.HysteresisDownwardMultiplier, actual.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
assert.Equal(t, expected.HysteresisUpwardMultiplier, actual.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
assert.Equal(t, c1.MaxCommitteesPerSlot, c2.MaxCommitteesPerSlot, "%s: MaxCommitteesPerSlot", name)
assert.Equal(t, c1.TargetCommitteeSize, c2.TargetCommitteeSize, "%s: TargetCommitteeSize", name)
assert.Equal(t, c1.MaxValidatorsPerCommittee, c2.MaxValidatorsPerCommittee, "%s: MaxValidatorsPerCommittee", name)
assert.Equal(t, c1.MinPerEpochChurnLimit, c2.MinPerEpochChurnLimit, "%s: MinPerEpochChurnLimit", name)
assert.Equal(t, c1.ChurnLimitQuotient, c2.ChurnLimitQuotient, "%s: ChurnLimitQuotient", name)
assert.Equal(t, c1.ShuffleRoundCount, c2.ShuffleRoundCount, "%s: ShuffleRoundCount", name)
assert.Equal(t, c1.MinGenesisActiveValidatorCount, c2.MinGenesisActiveValidatorCount, "%s: MinGenesisActiveValidatorCount", name)
assert.Equal(t, c1.MinGenesisTime, c2.MinGenesisTime, "%s: MinGenesisTime", name)
assert.Equal(t, c1.HysteresisQuotient, c2.HysteresisQuotient, "%s: HysteresisQuotient", name)
assert.Equal(t, c1.HysteresisDownwardMultiplier, c2.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
assert.Equal(t, c1.HysteresisUpwardMultiplier, c2.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
// Fork Choice params.
assert.Equal(t, expected.SafeSlotsToUpdateJustified, actual.SafeSlotsToUpdateJustified, "%s: SafeSlotsToUpdateJustified", name)
assert.Equal(t, c1.SafeSlotsToUpdateJustified, c2.SafeSlotsToUpdateJustified, "%s: SafeSlotsToUpdateJustified", name)
// Validator params.
assert.Equal(t, expected.Eth1FollowDistance, actual.Eth1FollowDistance, "%s: Eth1FollowDistance", name)
assert.Equal(t, expected.TargetAggregatorsPerCommittee, actual.TargetAggregatorsPerCommittee, "%s: TargetAggregatorsPerCommittee", name)
assert.Equal(t, expected.RandomSubnetsPerValidator, actual.RandomSubnetsPerValidator, "%s: RandomSubnetsPerValidator", name)
assert.Equal(t, expected.EpochsPerRandomSubnetSubscription, actual.EpochsPerRandomSubnetSubscription, "%s: EpochsPerRandomSubnetSubscription", name)
assert.Equal(t, expected.SecondsPerETH1Block, actual.SecondsPerETH1Block, "%s: SecondsPerETH1Block", name)
assert.Equal(t, c1.Eth1FollowDistance, c2.Eth1FollowDistance, "%s: Eth1FollowDistance", name)
assert.Equal(t, c1.TargetAggregatorsPerCommittee, c2.TargetAggregatorsPerCommittee, "%s: TargetAggregatorsPerCommittee", name)
assert.Equal(t, c1.RandomSubnetsPerValidator, c2.RandomSubnetsPerValidator, "%s: RandomSubnetsPerValidator", name)
assert.Equal(t, c1.EpochsPerRandomSubnetSubscription, c2.EpochsPerRandomSubnetSubscription, "%s: EpochsPerRandomSubnetSubscription", name)
assert.Equal(t, c1.SecondsPerETH1Block, c2.SecondsPerETH1Block, "%s: SecondsPerETH1Block", name)
// Deposit contract.
assert.Equal(t, expected.DepositChainID, actual.DepositChainID, "%s: DepositChainID", name)
assert.Equal(t, expected.DepositNetworkID, actual.DepositNetworkID, "%s: DepositNetworkID", name)
assert.Equal(t, expected.DepositContractAddress, actual.DepositContractAddress, "%s: DepositContractAddress", name)
assert.Equal(t, c1.DepositChainID, c2.DepositChainID, "%s: DepositChainID", name)
assert.Equal(t, c1.DepositNetworkID, c2.DepositNetworkID, "%s: DepositNetworkID", name)
assert.Equal(t, c1.DepositContractAddress, c2.DepositContractAddress, "%s: DepositContractAddress", name)
// Gwei values.
assert.Equal(t, expected.MinDepositAmount, actual.MinDepositAmount, "%s: MinDepositAmount", name)
assert.Equal(t, expected.MaxEffectiveBalance, actual.MaxEffectiveBalance, "%s: MaxEffectiveBalance", name)
assert.Equal(t, expected.EjectionBalance, actual.EjectionBalance, "%s: EjectionBalance", name)
assert.Equal(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement, "%s: EffectiveBalanceIncrement", name)
assert.Equal(t, c1.MinDepositAmount, c2.MinDepositAmount, "%s: MinDepositAmount", name)
assert.Equal(t, c1.MaxEffectiveBalance, c2.MaxEffectiveBalance, "%s: MaxEffectiveBalance", name)
assert.Equal(t, c1.EjectionBalance, c2.EjectionBalance, "%s: EjectionBalance", name)
assert.Equal(t, c1.EffectiveBalanceIncrement, c2.EffectiveBalanceIncrement, "%s: EffectiveBalanceIncrement", name)
// Initial values.
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
assert.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
assert.DeepEqual(t, c1.GenesisForkVersion, c2.GenesisForkVersion, "%s: GenesisForkVersion", name)
assert.DeepEqual(t, c1.BLSWithdrawalPrefixByte, c2.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
// Time parameters.
assert.Equal(t, expected.GenesisDelay, actual.GenesisDelay, "%s: GenesisDelay", name)
assert.Equal(t, expected.SecondsPerSlot, actual.SecondsPerSlot, "%s: SecondsPerSlot", name)
assert.Equal(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay, "%s: MinAttestationInclusionDelay", name)
assert.Equal(t, expected.SlotsPerEpoch, actual.SlotsPerEpoch, "%s: SlotsPerEpoch", name)
assert.Equal(t, expected.MinSeedLookahead, actual.MinSeedLookahead, "%s: MinSeedLookahead", name)
assert.Equal(t, expected.MaxSeedLookahead, actual.MaxSeedLookahead, "%s: MaxSeedLookahead", name)
assert.Equal(t, expected.EpochsPerEth1VotingPeriod, actual.EpochsPerEth1VotingPeriod, "%s: EpochsPerEth1VotingPeriod", name)
assert.Equal(t, expected.SlotsPerHistoricalRoot, actual.SlotsPerHistoricalRoot, "%s: SlotsPerHistoricalRoot", name)
assert.Equal(t, expected.MinValidatorWithdrawabilityDelay, actual.MinValidatorWithdrawabilityDelay, "%s: MinValidatorWithdrawabilityDelay", name)
assert.Equal(t, expected.ShardCommitteePeriod, actual.ShardCommitteePeriod, "%s: ShardCommitteePeriod", name)
assert.Equal(t, expected.MinEpochsToInactivityPenalty, actual.MinEpochsToInactivityPenalty, "%s: MinEpochsToInactivityPenalty", name)
assert.Equal(t, c1.GenesisDelay, c2.GenesisDelay, "%s: GenesisDelay", name)
assert.Equal(t, c1.SecondsPerSlot, c2.SecondsPerSlot, "%s: SecondsPerSlot", name)
assert.Equal(t, c1.MinAttestationInclusionDelay, c2.MinAttestationInclusionDelay, "%s: MinAttestationInclusionDelay", name)
assert.Equal(t, c1.SlotsPerEpoch, c2.SlotsPerEpoch, "%s: SlotsPerEpoch", name)
assert.Equal(t, c1.MinSeedLookahead, c2.MinSeedLookahead, "%s: MinSeedLookahead", name)
assert.Equal(t, c1.MaxSeedLookahead, c2.MaxSeedLookahead, "%s: MaxSeedLookahead", name)
assert.Equal(t, c1.EpochsPerEth1VotingPeriod, c2.EpochsPerEth1VotingPeriod, "%s: EpochsPerEth1VotingPeriod", name)
assert.Equal(t, c1.SlotsPerHistoricalRoot, c2.SlotsPerHistoricalRoot, "%s: SlotsPerHistoricalRoot", name)
assert.Equal(t, c1.MinValidatorWithdrawabilityDelay, c2.MinValidatorWithdrawabilityDelay, "%s: MinValidatorWithdrawabilityDelay", name)
assert.Equal(t, c1.ShardCommitteePeriod, c2.ShardCommitteePeriod, "%s: ShardCommitteePeriod", name)
assert.Equal(t, c1.MinEpochsToInactivityPenalty, c2.MinEpochsToInactivityPenalty, "%s: MinEpochsToInactivityPenalty", name)
// State vector lengths.
assert.Equal(t, expected.EpochsPerHistoricalVector, actual.EpochsPerHistoricalVector, "%s: EpochsPerHistoricalVector", name)
assert.Equal(t, expected.EpochsPerSlashingsVector, actual.EpochsPerSlashingsVector, "%s: EpochsPerSlashingsVector", name)
assert.Equal(t, expected.HistoricalRootsLimit, actual.HistoricalRootsLimit, "%s: HistoricalRootsLimit", name)
assert.Equal(t, expected.ValidatorRegistryLimit, actual.ValidatorRegistryLimit, "%s: ValidatorRegistryLimit", name)
assert.Equal(t, c1.EpochsPerHistoricalVector, c2.EpochsPerHistoricalVector, "%s: EpochsPerHistoricalVector", name)
assert.Equal(t, c1.EpochsPerSlashingsVector, c2.EpochsPerSlashingsVector, "%s: EpochsPerSlashingsVector", name)
assert.Equal(t, c1.HistoricalRootsLimit, c2.HistoricalRootsLimit, "%s: HistoricalRootsLimit", name)
assert.Equal(t, c1.ValidatorRegistryLimit, c2.ValidatorRegistryLimit, "%s: ValidatorRegistryLimit", name)
// Reward and penalty quotients.
assert.Equal(t, expected.BaseRewardFactor, actual.BaseRewardFactor, "%s: BaseRewardFactor", name)
assert.Equal(t, expected.WhistleBlowerRewardQuotient, actual.WhistleBlowerRewardQuotient, "%s: WhistleBlowerRewardQuotient", name)
assert.Equal(t, expected.ProposerRewardQuotient, actual.ProposerRewardQuotient, "%s: ProposerRewardQuotient", name)
assert.Equal(t, expected.InactivityPenaltyQuotient, actual.InactivityPenaltyQuotient, "%s: InactivityPenaltyQuotient", name)
assert.Equal(t, expected.InactivityPenaltyQuotientAltair, actual.InactivityPenaltyQuotientAltair, "%s: InactivityPenaltyQuotientAltair", name)
assert.Equal(t, expected.MinSlashingPenaltyQuotient, actual.MinSlashingPenaltyQuotient, "%s: MinSlashingPenaltyQuotient", name)
assert.Equal(t, expected.MinSlashingPenaltyQuotientAltair, actual.MinSlashingPenaltyQuotientAltair, "%s: MinSlashingPenaltyQuotientAltair", name)
assert.Equal(t, expected.ProportionalSlashingMultiplier, actual.ProportionalSlashingMultiplier, "%s: ProportionalSlashingMultiplier", name)
assert.Equal(t, expected.ProportionalSlashingMultiplierAltair, actual.ProportionalSlashingMultiplierAltair, "%s: ProportionalSlashingMultiplierAltair", name)
assert.Equal(t, c1.BaseRewardFactor, c2.BaseRewardFactor, "%s: BaseRewardFactor", name)
assert.Equal(t, c1.WhistleBlowerRewardQuotient, c2.WhistleBlowerRewardQuotient, "%s: WhistleBlowerRewardQuotient", name)
assert.Equal(t, c1.ProposerRewardQuotient, c2.ProposerRewardQuotient, "%s: ProposerRewardQuotient", name)
assert.Equal(t, c1.InactivityPenaltyQuotient, c2.InactivityPenaltyQuotient, "%s: InactivityPenaltyQuotient", name)
assert.Equal(t, c1.InactivityPenaltyQuotientAltair, c2.InactivityPenaltyQuotientAltair, "%s: InactivityPenaltyQuotientAltair", name)
assert.Equal(t, c1.MinSlashingPenaltyQuotient, c2.MinSlashingPenaltyQuotient, "%s: MinSlashingPenaltyQuotient", name)
assert.Equal(t, c1.MinSlashingPenaltyQuotientAltair, c2.MinSlashingPenaltyQuotientAltair, "%s: MinSlashingPenaltyQuotientAltair", name)
assert.Equal(t, c1.ProportionalSlashingMultiplier, c2.ProportionalSlashingMultiplier, "%s: ProportionalSlashingMultiplier", name)
assert.Equal(t, c1.ProportionalSlashingMultiplierAltair, c2.ProportionalSlashingMultiplierAltair, "%s: ProportionalSlashingMultiplierAltair", name)
// Max operations per block.
assert.Equal(t, expected.MaxProposerSlashings, actual.MaxProposerSlashings, "%s: MaxProposerSlashings", name)
assert.Equal(t, expected.MaxAttesterSlashings, actual.MaxAttesterSlashings, "%s: MaxAttesterSlashings", name)
assert.Equal(t, expected.MaxAttestations, actual.MaxAttestations, "%s: MaxAttestations", name)
assert.Equal(t, expected.MaxDeposits, actual.MaxDeposits, "%s: MaxDeposits", name)
assert.Equal(t, expected.MaxVoluntaryExits, actual.MaxVoluntaryExits, "%s: MaxVoluntaryExits", name)
assert.Equal(t, c1.MaxProposerSlashings, c2.MaxProposerSlashings, "%s: MaxProposerSlashings", name)
assert.Equal(t, c1.MaxAttesterSlashings, c2.MaxAttesterSlashings, "%s: MaxAttesterSlashings", name)
assert.Equal(t, c1.MaxAttestations, c2.MaxAttestations, "%s: MaxAttestations", name)
assert.Equal(t, c1.MaxDeposits, c2.MaxDeposits, "%s: MaxDeposits", name)
assert.Equal(t, c1.MaxVoluntaryExits, c2.MaxVoluntaryExits, "%s: MaxVoluntaryExits", name)
// Signature domains.
assert.Equal(t, expected.DomainBeaconProposer, actual.DomainBeaconProposer, "%s: DomainBeaconProposer", name)
assert.Equal(t, expected.DomainBeaconAttester, actual.DomainBeaconAttester, "%s: DomainBeaconAttester", name)
assert.Equal(t, expected.DomainRandao, actual.DomainRandao, "%s: DomainRandao", name)
assert.Equal(t, expected.DomainDeposit, actual.DomainDeposit, "%s: DomainDeposit", name)
assert.Equal(t, expected.DomainVoluntaryExit, actual.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
assert.Equal(t, expected.DomainSelectionProof, actual.DomainSelectionProof, "%s: DomainSelectionProof", name)
assert.Equal(t, expected.DomainAggregateAndProof, actual.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
assert.Equal(t, c1.DomainBeaconProposer, c2.DomainBeaconProposer, "%s: DomainBeaconProposer", name)
assert.Equal(t, c1.DomainBeaconAttester, c2.DomainBeaconAttester, "%s: DomainBeaconAttester", name)
assert.Equal(t, c1.DomainRandao, c2.DomainRandao, "%s: DomainRandao", name)
assert.Equal(t, c1.DomainDeposit, c2.DomainDeposit, "%s: DomainDeposit", name)
assert.Equal(t, c1.DomainVoluntaryExit, c2.DomainVoluntaryExit, "%s: DomainVoluntaryExit", name)
assert.Equal(t, c1.DomainSelectionProof, c2.DomainSelectionProof, "%s: DomainSelectionProof", name)
assert.Equal(t, c1.DomainAggregateAndProof, c2.DomainAggregateAndProof, "%s: DomainAggregateAndProof", name)
assertYamlFieldsMatch(t, name, fields, expected, actual)
assertYamlFieldsMatch(t, name, fields, c1, c2)
}
t.Run("mainnet", func(t *testing.T) {
@@ -129,17 +129,6 @@ func TestLoadConfigFile(t *testing.T) {
fields := fieldsFromYamls(t, append(minimalPresetsFiles, minimalConfigFile))
assertVals("minimal", fields, params.MinimalSpecConfig(), params.BeaconConfig())
})
t.Run("e2e", func(t *testing.T) {
minimalPresetsFiles := presetsFilePath(t, "minimal")
for _, fp := range minimalPresetsFiles {
params.LoadChainConfigFile(fp, nil)
}
configFile := "testdata/e2e_config.yaml"
params.LoadChainConfigFile(configFile, nil)
fields := fieldsFromYamls(t, append(minimalPresetsFiles, configFile))
assertVals("e2e", fields, params.E2ETestConfig(), params.BeaconConfig())
})
}
func TestLoadConfigFile_OverwriteCorrectly(t *testing.T) {

View File

@@ -1,118 +0,0 @@
# e2e config
# Extends the minimal preset
PRESET_BASE: 'minimal'
# Transition
# ---------------------------------------------------------------
# TBD, 2**256-2**10 is a placeholder, e2e is 600
TERMINAL_TOTAL_DIFFICULTY: 600
# By default, don't use these params
#TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
#TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
# Genesis
# ---------------------------------------------------------------
# [customized]
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 256 # Override for e2e tests
# Jan 3, 2020
MIN_GENESIS_TIME: 1578009600
# Highest byte set to 0x01 to avoid collisions with mainnet versioning
GENESIS_FORK_VERSION: 0x000000fd
# [customized] Faster to spin up testnets, but does not give validator reasonable warning time for genesis
GENESIS_DELAY: 10 # Override for e2e tests
# Forking
# ---------------------------------------------------------------
# Values provided for illustrative purposes.
# Individual tests/testnets may set different values.
# Altair
ALTAIR_FORK_VERSION: 0x010000fd
ALTAIR_FORK_EPOCH: 6 # Override for e2e
# Bellatrix
BELLATRIX_FORK_VERSION: 0x020000fd
BELLATRIX_FORK_EPOCH: 8
# Sharding
SHARDING_FORK_VERSION: 0x030000fd
SHARDING_FORK_EPOCH: 18446744073709551615
# Time parameters
# ---------------------------------------------------------------
# [customized] Faster for testing purposes
SECONDS_PER_SLOT: 10 # Override for e2e tests
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 2 # Override for e2e tests
# 2**8 (= 256) epochs
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
SHARD_COMMITTEE_PERIOD: 4 # Override for e2e tests
# [customized] process deposits more quickly, but insecure
ETH1_FOLLOW_DISTANCE: 4 # Override for e2e tests
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# [customized] scale queue churn at much lower validator counts for testing
CHURN_LIMIT_QUOTIENT: 65536
# Fork choice
# ---------------------------------------------------------------
# 70%
PROPOSER_SCORE_BOOST: 70
# Deposit contract
# ---------------------------------------------------------------
# Ethereum Goerli testnet
DEPOSIT_CHAIN_ID: 1337 # Override for e2e tests
DEPOSIT_NETWORK_ID: 1337 # Override for e2e tests
# Configured on a per testnet basis
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
# Updated penalty values
# ---------------------------------------------------------------
# 3 * 2**24 (= 50,331,648)
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648
# 2**6 (= 64)
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
# 2
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
# Sync committee
# ---------------------------------------------------------------
# [customized]
SYNC_COMMITTEE_SIZE: 32
# [customized]
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
# Sync protocol
# ---------------------------------------------------------------
# 1
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
# Other e2e overrides
# ---------------------------------------------------------------
CONFIG_NAME: "end-to-end"
SLOTS_PER_EPOCH: 6
EPOCHS_PER_ETH1_VOTING_PERIOD: 2
MAX_SEED_LOOKAHEAD: 1

View File

@@ -0,0 +1,43 @@
package params
import "math"
// UsePyrmontNetworkConfig uses the Pyrmont specific
// network config.
func UsePyrmontNetworkConfig() {
cfg := BeaconNetworkConfig().Copy()
cfg.ContractDeploymentBlock = 3743587
cfg.BootstrapNodes = []string{
"enr:-Ku4QOA5OGWObY8ep_x35NlGBEj7IuQULTjkgxC_0G1AszqGEA0Wn2RNlyLFx9zGTNB1gdFBA6ZDYxCgIza1uJUUOj4Dh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDVTPWXAAAgCf__________gmlkgnY0gmlwhDQPSjiJc2VjcDI1NmsxoQM6yTQB6XGWYJbI7NZFBjp4Yb9AYKQPBhVrfUclQUobb4N1ZHCCIyg",
"enr:-Ku4QOksdA2tabOGrfOOr6NynThMoio6Ggka2oDPqUuFeWCqcRM2alNb8778O_5bK95p3EFt0cngTUXm2H7o1jkSJ_8Dh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDVTPWXAAAgCf__________gmlkgnY0gmlwhDaa13aJc2VjcDI1NmsxoQKdNQJvnohpf0VO0ZYCAJxGjT0uwJoAHbAiBMujGjK0SoN1ZHCCIyg",
}
OverrideBeaconNetworkConfig(cfg)
}
// UsePyrmontConfig sets the main beacon chain
// config for Pyrmont.
func UsePyrmontConfig() {
beaconConfig = PyrmontConfig()
}
// PyrmontConfig defines the config for the
// Pyrmont testnet.
func PyrmontConfig() *BeaconChainConfig {
cfg := MainnetConfig().Copy()
cfg.MinGenesisTime = 1605700800
cfg.GenesisDelay = 432000
cfg.ConfigName = ConfigNames[Pyrmont]
cfg.GenesisForkVersion = []byte{0x00, 0x00, 0x20, 0x09}
cfg.AltairForkVersion = []byte{0x01, 0x00, 0x20, 0x09}
cfg.AltairForkEpoch = 61650
cfg.BellatrixForkVersion = []byte{0x02, 0x00, 0x20, 0x09}
cfg.BellatrixForkEpoch = math.MaxUint64
cfg.ShardingForkVersion = []byte{0x03, 0x00, 0x20, 0x09}
cfg.ShardingForkEpoch = math.MaxUint64
cfg.SecondsPerETH1Block = 14
cfg.DepositChainID = 5
cfg.DepositNetworkID = 5
cfg.DepositContractAddress = "0x8c5fecdC472E27Bc447696F431E425D02dd46a8c"
cfg.InitializeForkSchedule()
return cfg
}

View File

@@ -12,6 +12,7 @@ const (
Mainnet ConfigName = iota
Minimal
EndToEnd
Pyrmont
Prater
EndToEndMainnet
)
@@ -32,6 +33,7 @@ var ConfigNames = map[ConfigName]string{
Mainnet: "mainnet",
Minimal: "minimal",
EndToEnd: "end-to-end",
Pyrmont: "pyrmont",
Prater: "prater",
EndToEndMainnet: "end-to-end-mainnet",
}
@@ -40,6 +42,7 @@ var ConfigNames = map[ConfigName]string{
var KnownConfigs = map[ConfigName]func() *BeaconChainConfig{
Mainnet: MainnetConfig,
Prater: PraterConfig,
Pyrmont: PyrmontConfig,
Minimal: MinimalSpecConfig,
EndToEnd: E2ETestConfig,
EndToEndMainnet: E2EMainnetTestConfig,

View File

@@ -3756,13 +3756,6 @@ def prysm_deps():
sum = "h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=",
version = "v2.3.0",
)
go_repository(
name = "com_github_uudashr_gocognit",
importpath = "github.com/uudashr/gocognit",
sum = "h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=",
version = "v1.0.5",
)
go_repository(
name = "com_github_valyala_bytebufferpool",
importpath = "github.com/valyala/bytebufferpool",

1
go.mod
View File

@@ -77,7 +77,6 @@ require (
github.com/trailofbits/go-mutexasserts v0.0.0-20200708152505-19999e7d3cef
github.com/tyler-smith/go-bip39 v1.1.0
github.com/urfave/cli/v2 v2.3.0
github.com/uudashr/gocognit v1.0.5
github.com/wealdtech/go-bytesutil v1.1.1
github.com/wealdtech/go-eth2-util v1.6.3
github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3

3
go.sum
View File

@@ -1358,8 +1358,6 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=
github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
@@ -1806,7 +1804,6 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w=
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -1,7 +1,6 @@
{
"unsafeptr": {
"exclude_files": {
"beacon-chain/state/state-native/custom-types/block_roots.go": "Needed for field management operations",
"external/.*": "Unsafe third party code",
"rules_go_work-.*": "Third party code"
}
@@ -171,13 +170,5 @@
"rules_go_work-.*": "Third party code",
".*_test\\.go": "Tests are ok"
}
},
"gocognit": {
"exclude_files": {
"external/.*": "Third party code",
"rules_go_work-.*": "Third party code",
".*\\.pb.*.go": "Generated code is ok",
".*generated\\.ssz\\.go": "Generated code is ok"
}
}
}

View File

@@ -5,7 +5,6 @@ import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
@@ -174,13 +173,6 @@ func TestJsonMarshalUnmarshal(t *testing.T) {
require.DeepEqual(t, [][]byte{[]byte("hi")}, payloadPb.Transactions)
require.DeepEqual(t, [][]byte{[]byte("bye")}, payloadPb.Uncles)
})
t.Run("nil execution block", func(t *testing.T) {
jsonPayload := (*enginev1.ExecutionBlock)(nil)
enc, err := json.Marshal(jsonPayload)
require.NoError(t, err)
payloadPb := &enginev1.ExecutionBlock{}
require.ErrorIs(t, hexutil.ErrEmptyString, json.Unmarshal(enc, payloadPb))
})
}
func TestPayloadIDBytes_MarshalUnmarshalJSON(t *testing.T) {

View File

@@ -17,7 +17,6 @@ go_test(
"//:prysm_sh",
"//cmd/beacon-chain",
"//cmd/validator",
"//config/params:custom_configs",
"//tools/bootnode",
"@com_github_ethereum_go_ethereum//cmd/geth",
"@web3signer",
@@ -36,7 +35,6 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/operations/slashings/mock:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//build/bazel:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -50,6 +48,7 @@ go_test(
"//testing/require:go_default_library",
"//testing/slasher/simulator:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
@@ -73,7 +72,6 @@ go_test(
"//:prysm_sh",
"//cmd/beacon-chain",
"//cmd/validator",
"//config/params:custom_configs",
"//tools/bootnode",
"@com_github_ethereum_go_ethereum//cmd/geth",
"@web3signer",
@@ -92,7 +90,6 @@ go_test(
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//build/bazel:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -9,6 +9,7 @@ go_library(
"lighthouse_beacon.go",
"lighthouse_validator.go",
"log.go",
"proxy.go",
"tracing_sink.go",
"validator.go",
"web3remotesigner.go",
@@ -29,6 +30,7 @@ go_library(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/file:go_default_library",
"//proto/engine/v1:go_default_library",
"//runtime/interop:go_default_library",
"//testing/endtoend/components/eth1:go_default_library",
"//testing/endtoend/helpers:go_default_library",
@@ -54,15 +56,11 @@ go_test(
name = "go_default_test",
size = "small",
srcs = ["web3remotesigner_test.go"],
data = [
"//config/params:custom_configs",
"@web3signer",
],
data = ["@web3signer"],
deps = [
":go_default_library",
"//config/params:go_default_library",
"//testing/endtoend/params:go_default_library",
"//testing/require:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
],
)

View File

@@ -117,8 +117,10 @@ func (node *BeaconNode) Start(ctx context.Context) error {
expectedNumOfPeers += 1
}
jwtPath := path.Join(e2e.TestParams.TestPath, "eth1data/"+strconv.Itoa(node.index)+"/")
eth1Port := e2e.TestParams.Ports.Eth1AuthRPCPort
if index == 0 {
jwtPath = path.Join(e2e.TestParams.TestPath, "eth1data/miner/")
eth1Port = e2e.TestParams.Ports.Eth1ProxyPort
}
jwtPath = path.Join(jwtPath, "geth/jwtsecret")
args := []string{
@@ -126,7 +128,7 @@ func (node *BeaconNode) Start(ctx context.Context) error {
fmt.Sprintf("--%s=%s", cmdshared.LogFileName.Name, stdOutFile.Name()),
fmt.Sprintf("--%s=%s", flags.DepositContractFlag.Name, e2e.TestParams.ContractAddress.Hex()),
fmt.Sprintf("--%s=%d", flags.RPCPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeRPCPort+index),
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, e2e.TestParams.Ports.Eth1AuthRPCPort+index),
fmt.Sprintf("--%s=http://127.0.0.1:%d", flags.HTTPWeb3ProviderFlag.Name, eth1Port+index),
fmt.Sprintf("--%s=%s", flags.ExecutionJWTSecretFlag.Name, jwtPath),
fmt.Sprintf("--%s=%d", flags.MinSyncPeers.Name, 1),
fmt.Sprintf("--%s=%d", cmdshared.P2PUDPPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeUDPPort+index),

View File

@@ -0,0 +1,511 @@
package components
// Package main provides a proxy middleware for engine API requests between Ethereum
// consensus clients and execution clients accordingly. Allows for configuration of various
// test cases using yaml files as detailed in the README.md of the document.
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"encoding/json"
"flag"
"io"
"io/ioutil"
"math/big"
"net"
"net/http"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/prysmaticlabs/prysm/testing/endtoend/helpers"
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
"github.com/sirupsen/logrus"
)
var (
spoofingConfigFile = flag.String("spoofing-config", "tools/engine-proxy/spoofing_config.yaml", "")
executionEndpoint = flag.String("execution-endpoint", "http://localhost:8545", "")
fuzz = flag.Bool(
"fuzz",
false,
"fuzzes requests and responses, overrides -spoofing-config if -fuzz is set",
)
)
type spoofingConfig struct {
Requests []*spoof `yaml:"requests"`
Responses []*spoof `yaml:"responses"`
}
type spoof struct {
Method string
Fields map[string]interface{}
}
type jsonRPCObject struct {
Jsonrpc string `json:"jsonrpc"`
Method string `json:"method"`
Params []interface{} `json:"params"`
ID uint64 `json:"id"`
Result interface{} `json:"result"`
}
type forkchoiceUpdatedResponse struct {
Status *pb.PayloadStatus `json:"payloadStatus"`
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
}
type InterceptorFunc func(reqBytes []byte, w http.ResponseWriter, r *http.Request) bool
type ProxyNode struct {
address string
srv *http.Server
destAddress string
logger *logrus.Logger
logEntry *logrus.Entry
interceptor InterceptorFunc
backedUpRequests []*http.Request
}
func NewProxyNode(proxyPort int, destAddress string) *ProxyNode {
pn := &ProxyNode{}
mux := http.NewServeMux()
mux.HandleFunc("/", pn.proxyHandler())
addr := "127.0.0.1:" + strconv.Itoa(proxyPort)
srv := &http.Server{
Handler: mux,
Addr: addr,
}
pn.address = addr
pn.srv = srv
pn.destAddress = destAddress
return pn
}
func (pn *ProxyNode) Start(ctx context.Context) error {
stdOutFile, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, "eth1-proxy.log")
if err != nil {
return err
}
pn.logger = logrus.New()
pn.logger.SetOutput(stdOutFile)
pn.logEntry = pn.logger.WithField("prefix", "engine-proxy")
pn.logEntry.Infof("Engine proxy now listening on address %s", pn.address)
pn.srv.BaseContext = func(listener net.Listener) context.Context {
return ctx
}
go func() {
if err := pn.srv.ListenAndServe(); err != nil {
pn.logEntry.Error(err)
}
}()
for {
select {
case <-ctx.Done():
return pn.srv.Shutdown(context.Background())
}
}
}
func (pn *ProxyNode) AddInterceptor(icptr InterceptorFunc) {
pn.interceptor = icptr
}
func (pn *ProxyNode) SyncingInterceptor() InterceptorFunc {
return func(reqBytes []byte, w http.ResponseWriter, r *http.Request) bool {
if !pn.checkIfValid(reqBytes) {
return false
}
pn.returnSyncingResponse(reqBytes, w, r)
return true
}
}
// Proxies requests from a consensus client to an execution client, spoofing requests
// and/or responses as desired. Acts as a middleware useful for testing different merge scenarios.
func (pn *ProxyNode) proxyHandler() func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
requestBytes, err := pn.parseRequestBytes(r)
if err != nil {
pn.logEntry.WithError(err).Error("Could not parse request")
return
}
if pn.interceptor != nil && pn.interceptor(requestBytes, w, r) {
return
}
for _, rq := range pn.backedUpRequests {
requestB, err := pn.parseRequestBytes(rq)
if err != nil {
pn.logEntry.WithError(err).Error("Could not parse request")
return
}
destAddr := "http://" + pn.destAddress
// Create a new proxy request to the execution client.
url := rq.URL
url.Host = destAddr
proxyReq, err := http.NewRequest(rq.Method, destAddr, rq.Body)
if err != nil {
pn.logEntry.WithError(err).Error("Could create new request")
return
}
// Set the modified request as the proxy request body.
proxyReq.Body = ioutil.NopCloser(bytes.NewBuffer(requestB))
// Required proxy headers for forwarding JSON-RPC requests to the execution client.
proxyReq.Header.Set("Host", rq.Host)
proxyReq.Header.Set("X-Forwarded-For", rq.RemoteAddr)
proxyReq.Header.Set("Content-Type", "application/json")
client := &http.Client{}
proxyRes, err := client.Do(proxyReq)
if err != nil {
pn.logEntry.WithError(err).Error("Could not do client proxy")
return
}
buf := bytes.NewBuffer([]byte{})
// Pipe the proxy response to the original caller.
if _, err = io.Copy(buf, proxyRes.Body); err != nil {
pn.logEntry.WithError(err).Error("Could not copy proxy request body")
return
}
if err = proxyRes.Body.Close(); err != nil {
pn.logEntry.WithError(err).Error("Could not do client proxy")
return
}
pn.logEntry.Infof("Queued Request Response: %s", buf.String())
}
pn.backedUpRequests = []*http.Request{}
/*
var modifiedReq []byte
if *fuzz {
modifiedReq, err = fuzzRequest(requestBytes)
} else {
// We optionally spoof the request as desired.
modifiedReq, err = spoofRequest(config, requestBytes)
}
if err != nil {
log.WithError(err).Error("Failed to spoof request")
return
}
*/
destAddr := "http://" + pn.destAddress
// Create a new proxy request to the execution client.
url := r.URL
url.Host = destAddr
proxyReq, err := http.NewRequest(r.Method, destAddr, r.Body)
if err != nil {
pn.logEntry.WithError(err).Error("Could create new request")
return
}
// Set the modified request as the proxy request body.
proxyReq.Body = ioutil.NopCloser(bytes.NewBuffer(requestBytes))
// Required proxy headers for forwarding JSON-RPC requests to the execution client.
proxyReq.Header.Set("Host", r.Host)
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
proxyReq.Header.Set("Content-Type", "application/json")
client := &http.Client{}
proxyRes, err := client.Do(proxyReq)
if err != nil {
pn.logEntry.WithError(err).Error("Could not do client proxy")
return
}
/*
// We optionally spoof the response as desired.
var modifiedResp []byte
if *fuzz {
modifiedResp, err = fuzzResponse(proxyRes.Body)
} else {
modifiedResp, err = spoofResponse(config, requestBytes, proxyRes.Body)
}
if err != nil {
log.WithError(err).Error("Failed to spoof response")
return
}
*/
// Pipe the proxy response to the original caller.
if _, err = io.Copy(w, proxyRes.Body); err != nil {
pn.logEntry.WithError(err).Error("Could not copy proxy request body")
return
}
if err = proxyRes.Body.Close(); err != nil {
pn.logEntry.WithError(err).Error("Could not do client proxy")
return
}
}
}
func (pn *ProxyNode) parseRequestBytes(req *http.Request) ([]byte, error) {
requestBytes, err := ioutil.ReadAll(req.Body)
if err != nil {
return nil, err
}
if err = req.Body.Close(); err != nil {
return nil, err
}
pn.logEntry.Infof("%s", string(requestBytes))
req.Body = ioutil.NopCloser(bytes.NewBuffer(requestBytes))
return requestBytes, nil
}
func fuzzRequest(requestBytes []byte) ([]byte, error) {
// If the JSON request is not a JSON-RPC object, return the request as-is.
jsonRequest, err := unmarshalRPCObject(requestBytes)
if err != nil {
switch {
case strings.Contains(err.Error(), "cannot unmarshal array"):
return requestBytes, nil
default:
return nil, err
}
}
if len(jsonRequest.Params) == 0 {
return requestBytes, nil
}
params := make(map[string]interface{})
if err := extractObjectFromJSONRPC(jsonRequest.Params[0], &params); err != nil {
return requestBytes, nil
}
// 10% chance to fuzz a field.
fuzzProbability := uint64(5) // TODO: Do not hardcode.
for k, v := range params {
// Each field has a probability of of being fuzzed.
r, err := rand.Int(rand.Reader, big.NewInt(100))
if err != nil {
return nil, err
}
switch obj := v.(type) {
case string:
if strings.Contains(obj, "0x") && r.Uint64() <= fuzzProbability {
// Ignore hex strings of odd length.
if len(obj)%2 != 0 {
continue
}
// Generate some random hex string with same length and set it.
// TODO: Experiment with length < current field, or junk in general.
hexBytes, err := hexutil.Decode(obj)
if err != nil {
return nil, err
}
fuzzedStr, err := randomHex(len(hexBytes))
if err != nil {
return nil, err
}
log.WithField("method", jsonRequest.Method).Warnf(
"Fuzzing field %s, modifying value from %s to %s",
k,
obj,
fuzzedStr,
)
params[k] = fuzzedStr
}
}
}
jsonRequest.Params[0] = params
return json.Marshal(jsonRequest)
}
// Parses the request from thec consensus client and checks if user desires
// to spoof it based on the JSON-RPC method. If so, it returns the modified
// request bytes which will be proxied to the execution client.
func spoofRequest(config *spoofingConfig, requestBytes []byte) ([]byte, error) {
// If the JSON request is not a JSON-RPC object, return the request as-is.
jsonRequest, err := unmarshalRPCObject(requestBytes)
if err != nil {
switch {
case strings.Contains(err.Error(), "cannot unmarshal array"):
return requestBytes, nil
default:
return nil, err
}
}
if len(jsonRequest.Params) == 0 {
return requestBytes, nil
}
desiredMethodsToSpoof := make(map[string]*spoof)
for _, spoofReq := range config.Requests {
desiredMethodsToSpoof[spoofReq.Method] = spoofReq
}
// If we don't want to spoof the request, just return the request as-is.
spoofDetails, ok := desiredMethodsToSpoof[jsonRequest.Method]
if !ok {
return requestBytes, nil
}
// TODO: Support methods with multiple params.
params := make(map[string]interface{})
if err := extractObjectFromJSONRPC(jsonRequest.Params[0], &params); err != nil {
return nil, err
}
for fieldToModify, fieldValue := range spoofDetails.Fields {
if _, ok := params[fieldToModify]; !ok {
continue
}
params[fieldToModify] = fieldValue
}
log.WithField("method", jsonRequest.Method).Infof("Modified request %v", params)
jsonRequest.Params[0] = params
return json.Marshal(jsonRequest)
}
func (pn *ProxyNode) checkIfValid(reqBytes []byte) bool {
jsonRequest, err := unmarshalRPCObject(reqBytes)
if err != nil {
switch {
case strings.Contains(err.Error(), "cannot unmarshal array"):
return false
default:
return false
}
}
if strings.Contains(jsonRequest.Method, "engine_forkchoiceUpdatedV1") ||
strings.Contains(jsonRequest.Method, "engine_newPayloadV1") {
return true
}
return false
}
func (pn *ProxyNode) returnSyncingResponse(reqBytes []byte, w http.ResponseWriter, r *http.Request) {
jsonRequest, err := unmarshalRPCObject(reqBytes)
if err != nil {
return
}
switch {
case strings.Contains(jsonRequest.Method, "engine_forkchoiceUpdatedV1"):
resp := &forkchoiceUpdatedResponse{
Status: &pb.PayloadStatus{
Status: pb.PayloadStatus_SYNCING,
},
PayloadId: nil,
}
jResp := &jsonRPCObject{
Method: jsonRequest.Method,
ID: jsonRequest.ID,
Result: resp,
}
rawResp, err := json.Marshal(jResp)
if err != nil {
return
}
_, err = w.Write(rawResp)
_ = err
return
case strings.Contains(jsonRequest.Method, "engine_newPayloadV1"):
resp := &pb.PayloadStatus{
Status: pb.PayloadStatus_SYNCING,
}
jResp := &jsonRPCObject{
Method: jsonRequest.Method,
ID: jsonRequest.ID,
Result: resp,
}
rawResp, err := json.Marshal(jResp)
if err != nil {
return
}
_, err = w.Write(rawResp)
_ = err
pn.backedUpRequests = append(pn.backedUpRequests, r)
return
}
return
}
func fuzzResponse(responseBody io.Reader) ([]byte, error) {
responseBytes, err := ioutil.ReadAll(responseBody)
if err != nil {
return nil, err
}
return responseBytes, nil
}
// Parses the response body from the execution client and checks if user desires
// to spoof it based on the JSON-RPC method. If so, it returns the modified
// response bytes which will be proxied to the consensus client.
func spoofResponse(config *spoofingConfig, requestBytes []byte, responseBody io.Reader) ([]byte, error) {
responseBytes, err := ioutil.ReadAll(responseBody)
if err != nil {
return nil, err
}
// If the JSON request is not a JSON-RPC object, return the request as-is.
jsonRequest, err := unmarshalRPCObject(requestBytes)
if err != nil {
switch {
case strings.Contains(err.Error(), "cannot unmarshal array"):
return responseBytes, nil
default:
return nil, err
}
}
jsonResponse, err := unmarshalRPCObject(responseBytes)
if err != nil {
switch {
case strings.Contains(err.Error(), "cannot unmarshal array"):
return responseBytes, nil
default:
return nil, err
}
}
desiredMethodsToSpoof := make(map[string]*spoof)
for _, spoofReq := range config.Responses {
desiredMethodsToSpoof[spoofReq.Method] = spoofReq
}
// If we don't want to spoof the request, just return the request as-is.
spoofDetails, ok := desiredMethodsToSpoof[jsonRequest.Method]
if !ok {
return responseBytes, nil
}
// TODO: Support nested objects.
params := make(map[string]interface{})
if err := extractObjectFromJSONRPC(jsonResponse.Result, &params); err != nil {
return nil, err
}
for fieldToModify, fieldValue := range spoofDetails.Fields {
if _, ok := params[fieldToModify]; !ok {
continue
}
params[fieldToModify] = fieldValue
}
log.WithField("method", jsonRequest.Method).Infof("Modified response %v", params)
jsonResponse.Result = params
return json.Marshal(jsonResponse)
}
func unmarshalRPCObject(rawBytes []byte) (*jsonRPCObject, error) {
jsonObj := &jsonRPCObject{}
if err := json.Unmarshal(rawBytes, jsonObj); err != nil {
return nil, err
}
return jsonObj, nil
}
func extractObjectFromJSONRPC(src interface{}, dst interface{}) error {
rawResp, err := json.Marshal(src)
if err != nil {
return err
}
return json.Unmarshal(rawResp, dst)
}
func randomHex(n int) (string, error) {
b := make([]byte, n)
if _, err := rand.Read(b); err != nil {
return "", err
}
return "0x" + hex.EncodeToString(b), nil
}

View File

@@ -151,7 +151,7 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
args = append(args, features.E2EValidatorFlags...)
}
if v.config.UseWeb3RemoteSigner {
args = append(args, fmt.Sprintf("--%s=http://localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
args = append(args, fmt.Sprintf("--%s=localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
// Write the pubkeys as comma seperated hex strings with 0x prefix.
// See: https://docs.teku.consensys.net/en/latest/HowTo/External-Signer/Use-External-Signer/
_, pubs, err := interop.DeterministicallyGenerateKeys(uint64(offset), uint64(validatorNum))

View File

@@ -37,16 +37,13 @@ type rawKeyFile struct {
}
type Web3RemoteSigner struct {
ctx context.Context
started chan struct{}
configFilePath string
cmd *exec.Cmd
ctx context.Context
started chan struct{}
}
func NewWeb3RemoteSigner(configFilePath string) *Web3RemoteSigner {
func NewWeb3RemoteSigner() *Web3RemoteSigner {
return &Web3RemoteSigner{
started: make(chan struct{}, 1),
configFilePath: configFilePath,
started: make(chan struct{}, 1),
}
}
@@ -69,12 +66,6 @@ func (w *Web3RemoteSigner) Start(ctx context.Context) error {
return err
}
network := "minimal"
if len(w.configFilePath) > 0 {
// A file path to yaml config file is acceptable network argument.
network = w.configFilePath
}
args := []string{
// Global flags
fmt.Sprintf("--key-store-path=%s", keystorePath),
@@ -84,13 +75,13 @@ func (w *Web3RemoteSigner) Start(ctx context.Context) error {
// Command
"eth2",
// Command flags
"--network=" + network,
"--network=minimal",
"--slashing-protection-enabled=false", // Otherwise, a postgres DB is required.
"--key-manager-api-enabled=true",
"--enable-key-manager-api=true",
}
cmd := exec.CommandContext(ctx, binaryPath, args...) // #nosec G204 -- Test code is safe to do this.
w.cmd = cmd
// Write stdout and stderr to log files.
stdout, err := os.Create(path.Join(e2e.TestParams.LogPath, "web3signer.stdout.log"))
if err != nil {

View File

@@ -5,7 +5,6 @@ import (
"testing"
"time"
"github.com/bazelbuild/rules_go/go/tools/bazel"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/endtoend/components"
e2eparams "github.com/prysmaticlabs/prysm/testing/endtoend/params"
@@ -14,11 +13,7 @@ import (
func TestWeb3RemoteSigner_StartsAndReturnsPublicKeys(t *testing.T) {
require.NoError(t, e2eparams.Init(0))
fp, err := bazel.Runfile("config/params/testdata/e2e_config.yaml")
if err != nil {
t.Fatal(err)
}
wsc := components.NewWeb3RemoteSigner(fp)
wsc := components.NewWeb3RemoteSigner()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

View File

@@ -6,11 +6,10 @@ lighthouse_archive_name = "lighthouse-%s-x86_64-unknown-linux-gnu-portable.tar.g
def e2e_deps():
http_archive(
name = "web3signer",
# Built from commit 17d253b which has important unreleased changes.
urls = ["https://prysmaticlabs.com/uploads/web3signer-17d253b.tar.gz"],
sha256 = "bf450a59a0845c1ce8100b3192c7fec021b565efe8b1ab46bed9f71cb994a6d7",
urls = ["https://artifacts.consensys.net/public/web3signer/raw/names/web3signer.tar.gz/versions/21.10.5/web3signer-21.10.5.tar.gz"],
sha256 = "d122429f6a310bc555d1281e0b3f4e3ac43a7beec5e5dcf0a0d2416a5984f461",
build_file = "@prysm//testing/endtoend:web3signer.BUILD",
strip_prefix = "web3signer-develop",
strip_prefix = "web3signer-21.10.5",
)
http_archive(

View File

@@ -7,8 +7,10 @@ package endtoend
import (
"context"
"fmt"
"net/http"
"os"
"path"
"strconv"
"strings"
"sync"
"testing"
@@ -17,7 +19,6 @@ import (
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/build/bazel"
"github.com/prysmaticlabs/prysm/config/params"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -28,6 +29,7 @@ import (
e2e "github.com/prysmaticlabs/prysm/testing/endtoend/params"
e2etypes "github.com/prysmaticlabs/prysm/testing/endtoend/types"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/time/slots"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
@@ -80,7 +82,10 @@ func (r *testRunner) run() {
g.Go(func() error {
return tracingSink.Start(ctx)
})
proxyNode := components.NewProxyNode(e2e.TestParams.Ports.Eth1ProxyPort, "127.0.0.1:"+strconv.Itoa(e2e.TestParams.Ports.Eth1RPCPort))
g.Go(func() error {
return proxyNode.Start(ctx)
})
if multiClientActive {
keyGen = components.NewKeystoreGenerator()
@@ -90,21 +95,6 @@ func (r *testRunner) run() {
})
}
var web3RemoteSigner *components.Web3RemoteSigner
if config.UseWeb3RemoteSigner {
cfg, err := bazel.Runfile("config/params/testdata/e2e_config.yaml")
if err != nil {
t.Fatal(err)
}
web3RemoteSigner = components.NewWeb3RemoteSigner(cfg)
g.Go(func() error {
if err := web3RemoteSigner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start web3 remote signer")
}
return nil
})
}
// Boot node.
bootNode := components.NewBootNode()
g.Go(func() error {
@@ -162,6 +152,18 @@ func (r *testRunner) run() {
return nil
})
// Web3 remote signer.
var web3RemoteSigner *components.Web3RemoteSigner
if config.UseWeb3RemoteSigner {
web3RemoteSigner = components.NewWeb3RemoteSigner()
g.Go(func() error {
if err := web3RemoteSigner.Start(ctx); err != nil {
return errors.Wrap(err, "failed to start web3 remote signer")
}
return nil
})
}
if multiClientActive {
lighthouseNodes = components.NewLighthouseBeaconNodes(config)
g.Go(func() error {
@@ -258,6 +260,17 @@ func (r *testRunner) run() {
require.NoError(t, err)
tickingStartTime := helpers.EpochTickerStartTime(genesis)
proxyNode.AddInterceptor(func(reqBytes []byte, w http.ResponseWriter, r *http.Request) bool {
currSlot := slots.CurrentSlot(uint64(genesis.GenesisTime.AsTime().Unix()))
if currSlot < params.BeaconConfig().SlotsPerEpoch.Mul(uint64(9)) {
return false
}
if currSlot >= params.BeaconConfig().SlotsPerEpoch.Mul(uint64(10)) {
return false
}
return proxyNode.SyncingInterceptor()(reqBytes, w, r)
})
// Run assigned evaluators.
if err := r.runEvaluators(conns, tickingStartTime); err != nil {
return errors.Wrap(err, "one or more evaluators failed")
@@ -303,6 +316,9 @@ func (r *testRunner) runEvaluators(conns []*grpc.ClientConn, tickingStartTime ti
secondsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
ticker := helpers.NewEpochTicker(tickingStartTime, secondsPerEpoch)
for currentEpoch := range ticker.C() {
if currentEpoch == 9 || currentEpoch == 10 {
continue
}
wg := new(sync.WaitGroup)
for _, eval := range config.Evaluators {
// Fix reference to evaluator as it will be running
@@ -336,6 +352,7 @@ func (r *testRunner) runEvaluators(conns []*grpc.ClientConn, tickingStartTime ti
func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
keystorePath string, requiredNodes []e2etypes.ComponentRunner) {
minGenesisActiveCount := int(params.BeaconConfig().MinGenesisActiveValidatorCount)
depositCheckValidator := components.NewValidatorNode(r.config, int(e2e.DepositCount), e2e.TestParams.BeaconNodeCount, minGenesisActiveCount)
g.Go(func() error {
if err := helpers.ComponentsStarted(ctx, requiredNodes); err != nil {

View File

@@ -19,7 +19,7 @@ type testArgs struct {
useWeb3RemoteSigner bool
}
func TestEndToEnd_MinimalConfig(t *testing.T) {
func TestEndToEnd_MinimalConfigV(t *testing.T) {
e2eMinimal(t, &testArgs{
usePrysmSh: false,
useWeb3RemoteSigner: false,
@@ -27,6 +27,7 @@ func TestEndToEnd_MinimalConfig(t *testing.T) {
}
func TestEndToEnd_MinimalConfig_Web3Signer(t *testing.T) {
t.Skip("TODO(9994): Complete web3signer client implementation, currently blocked by https://github.com/ConsenSys/web3signer/issues/494")
e2eMinimal(t, &testArgs{
usePrysmSh: false,
useWeb3RemoteSigner: true,
@@ -53,10 +54,6 @@ func e2eMinimal(t *testing.T, args *testArgs) {
epochsToRun, err = strconv.Atoi(epochStr)
require.NoError(t, err)
}
// TODO(#10053): Web3signer does not support bellatrix yet.
if args.useWeb3RemoteSigner {
epochsToRun = helpers.BellatrixE2EForkEpoch - 1
}
if args.usePrysmSh {
// If using prysm.sh, run for only 6 epochs.
// TODO(#9166): remove this block once v2 changes are live.

View File

@@ -31,6 +31,7 @@ type ports struct {
Eth1RPCPort int
Eth1AuthRPCPort int
Eth1WSPort int
Eth1ProxyPort int
PrysmBeaconNodeRPCPort int
PrysmBeaconNodeUDPPort int
PrysmBeaconNodeTCPPort int
@@ -86,6 +87,7 @@ const (
Eth1RPCPort = Eth1Port + portSpan
Eth1WSPort = Eth1Port + 2*portSpan
Eth1AuthRPCPort = Eth1Port + 3*portSpan
Eth1ProxyPort = Eth1Port + 4*portSpan
PrysmBeaconNodeRPCPort = 4150
PrysmBeaconNodeUDPPort = PrysmBeaconNodeRPCPort + portSpan
@@ -233,6 +235,10 @@ func initializeStandardPorts(shardCount, shardIndex int, ports *ports, existingR
if err != nil {
return err
}
eth1ProxyPort, err := port(Eth1ProxyPort, shardCount, shardIndex, existingRegistrations)
if err != nil {
return err
}
beaconNodeRPCPort, err := port(PrysmBeaconNodeRPCPort, shardCount, shardIndex, existingRegistrations)
if err != nil {
return err
@@ -275,6 +281,7 @@ func initializeStandardPorts(shardCount, shardIndex int, ports *ports, existingR
ports.Eth1RPCPort = eth1RPCPort
ports.Eth1AuthRPCPort = eth1AuthPort
ports.Eth1WSPort = eth1WSPort
ports.Eth1ProxyPort = eth1ProxyPort
ports.PrysmBeaconNodeRPCPort = beaconNodeRPCPort
ports.PrysmBeaconNodeUDPPort = beaconNodeUDPPort
ports.PrysmBeaconNodeTCPPort = beaconNodeTCPPort

View File

@@ -102,7 +102,3 @@ func (m *engineMock) ExecutionBlockByHash(_ context.Context, hash common.Hash) (
Hash: b.BlockHash,
}, nil
}
func (m *engineMock) GetTerminalBlockHash(context.Context) ([]byte, bool, error) {
return nil, false, nil
}

View File

@@ -1,14 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["analyzer.go"],
importpath = "github.com/prysmaticlabs/prysm/tools/analyzers/gocognit",
visibility = ["//visibility:public"],
deps = [
"@com_github_uudashr_gocognit//:go_default_library",
"@org_golang_x_tools//go/analysis:go_default_library",
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
"@org_golang_x_tools//go/ast/inspector:go_default_library",
],
)

View File

@@ -1,195 +0,0 @@
> Copied from https://github.com/uudashr/gocognit/blob/5bf67146515e79acd2a8d5728deafa9d91ad48db/README.md
> License: MIT
[![GoDoc](https://godoc.org/github.com/uudashr/gocognit?status.svg)](https://godoc.org/github.com/uudashr/gocognit)
# Gocognit
Gocognit calculates cognitive complexities of functions in Go source code. A measurement of how hard does the code is intuitively to understand.
## Understanding the complexity
Given code using `if` statement,
```go
func GetWords(number int) string {
if number == 1 { // +1
return "one"
} else if number == 2 { // +1
return "a couple"
} else if number == 3 { // +1
return "a few"
} else { // +1
return "lots"
}
} // Cognitive complexity = 4
```
Above code can be refactored using `switch` statement,
```go
func GetWords(number int) string {
switch number { // +1
case 1:
return "one"
case 2:
return "a couple"
case 3:
return "a few"
default:
return "lots"
}
} // Cognitive complexity = 1
```
As you see above codes are the same, but the second code are easier to understand, that is why the cognitive complexity score are lower compare to the first one.
## Comparison with cyclometic complexity
### Example 1
#### Cyclometic complexity
```go
func GetWords(number int) string { // +1
switch number {
case 1: // +1
return "one"
case 2: // +1
return "a couple"
case 3: // +1
return "a few"
default:
return "lots"
}
} // Cyclomatic complexity = 4
```
#### Cognitive complexity
```go
func GetWords(number int) string {
switch number { // +1
case 1:
return "one"
case 2:
return "a couple"
case 3:
return "a few"
default:
return "lots"
}
} // Cognitive complexity = 1
```
Cognitive complexity give lower score compare to cyclomatic complexity.
### Example 2
#### Cyclomatic complexity
```go
func SumOfPrimes(max int) int { // +1
var total int
OUT:
for i := 1; i < max; i++ { // +1
for j := 2; j < i; j++ { // +1
if i%j == 0 { // +1
continue OUT
}
}
total += i
}
return total
} // Cyclomatic complexity = 4
```
#### Cognitive complexity
```go
func SumOfPrimes(max int) int {
var total int
OUT:
for i := 1; i < max; i++ { // +1
for j := 2; j < i; j++ { // +2 (nesting = 1)
if i%j == 0 { // +3 (nesting = 2)
continue OUT // +1
}
}
total += i
}
return total
} // Cognitive complexity = 7
```
Cognitive complexity give higher score compare to cyclomatic complexity.
## Rules
The cognitive complexity of a function is calculated according to the
following rules:
> Note: these rules are specific for Go, please see the [original whitepaper](./CognitiveComplexity.pdf) for more complete reference.
### Increments
There is an increment for each of the following:
1. `if`, `else if`, `else`
2. `switch`, `select`
3. `for`
4. `goto` LABEL, `break` LABEL, `continue` LABEL
5. sequence of binary logical operators
6. each method in a recursion cycle
### Nesting level
The following structures increment the nesting level:
1. `if`, `else if`, `else`
2. `switch`, `select`
3. `for`
4. function literal or lambda
### Nesting increments
The following structures receive a nesting increment commensurate with their nested depth inside nesting structures:
1. `if`
2. `switch`, `select`
3. `for`
## Installation
```
$ go install github.com/uudashr/gocognit/cmd/gocognit@latest
```
or
```
$ go get github.com/uudashr/gocognit/cmd/gocognit
```
## Usage
```
$ gocognit
Calculate cognitive complexities of Go functions.
Usage:
gocognit [flags] <Go file or directory> ...
Flags:
-over N show functions with complexity > N only and
return exit code 1 if the set is non-empty
-top N show the top N most complex functions only
-avg show the average complexity over all functions,
not depending on whether -over or -top are set
The output fields for each line are:
<complexity> <package> <function> <file:row:column>
```
Examples:
```
$ gocognit .
$ gocognit main.go
$ gocognit -top 10 src/
$ gocognit -over 25 docker
$ gocognit -avg .
```
The output fields for each line are:
```
<complexity> <package> <function> <file:row:column>
```
## Related project
- [Gocyclo](https://github.com/fzipp/gocyclo) where the code are based on.
- [Cognitive Complexity: A new way of measuring understandability](./CognitiveComplexity.pdf) white paper by G. Ann Campbell.

View File

@@ -1,90 +0,0 @@
package gocognit
import (
"errors"
"fmt"
"go/ast"
"github.com/uudashr/gocognit"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
// Doc explaining the tool.
const Doc = "Tool to ensure go code does not have high cognitive complexity."
// Analyzer runs static analysis.
var Analyzer = &analysis.Analyzer{
Name: "gocognit",
Doc: Doc,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
}
// Recommended thresholds according to the 2008 presentation titled
// "Software Quality Metrics to Identify Risk" by Thomas McCabe Jr.
//
// 1 - 10 Simple procedure, little risk
// 11 - 20 More complex, moderate risk
// 21 - 50 Complex, high risk
// > 50 Untestable code, very high risk
//
// This threshold should be lowered to 50 over time.
const over = 130
func run(pass *analysis.Pass) (interface{}, error) {
inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
if !ok {
return nil, errors.New("analyzer is not type *inspector.Inspector")
}
nodeFilter := []ast.Node{
(*ast.FuncDecl)(nil),
}
inspect.Preorder(nodeFilter, func(n ast.Node) {
fnDecl, ok := n.(*ast.FuncDecl)
if !ok {
return
}
fnName := funcName(fnDecl)
fnComplexity := gocognit.Complexity(fnDecl)
if fnComplexity > over {
pass.Reportf(fnDecl.Pos(), "cognitive complexity %d of func %s is high (> %d)", fnComplexity, fnName, over)
}
})
return nil, nil
}
// funcName returns the name representation of a function or method:
// "(Type).Name" for methods or simply "Name" for functions.
//
// Copied from https://github.com/uudashr/gocognit/blob/5bf67146515e79acd2a8d5728deafa9d91ad48db/gocognit.go
// License: MIT
func funcName(fn *ast.FuncDecl) string {
if fn.Recv != nil {
if fn.Recv.NumFields() > 0 {
typ := fn.Recv.List[0].Type
return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name)
}
}
return fn.Name.Name
}
// recvString returns a string representation of recv of the
// form "T", "*T", or "BADRECV" (if not a proper receiver type).
//
// Copied from https://github.com/uudashr/gocognit/blob/5bf67146515e79acd2a8d5728deafa9d91ad48db/gocognit.go
// License: MIT
func recvString(recv ast.Expr) string {
switch t := recv.(type) {
case *ast.Ident:
return t.Name
case *ast.StarExpr:
return "*" + recvString(t.X)
}
return "BADRECV"
}

View File

@@ -7,7 +7,7 @@ Flags:
-beacon string
gRPC address of the Prysm beacon node (default "127.0.0.1:4000")
-genesis uint
Genesis time. mainnet=1606824023, prater=1616508000 (default 1606824023)
Genesis time. mainnet=1606824023, prater=1616508000, pyrmont=1605722407 (default 1606824023)
```
Usage:

View File

@@ -18,7 +18,7 @@ import (
var (
beacon = flag.String("beacon", "127.0.0.1:4000", "gRPC address of the Prysm beacon node")
genesis = flag.Uint64("genesis", 1606824023, "Genesis time. mainnet=1606824023, prater=1616508000")
genesis = flag.Uint64("genesis", 1606824023, "Genesis time. mainnet=1606824023, prater=1616508000, pyrmont=1605722407")
)
func main() {

View File

@@ -276,7 +276,9 @@ func displayExitInfo(rawExitedKeys [][]byte, trimmedExitedKeys []string) {
urlFormattedPubKeys := make([]string, len(rawExitedKeys))
for i, key := range rawExitedKeys {
var baseUrl string
if params.BeaconConfig().ConfigName == params.ConfigNames[params.Prater] {
if params.BeaconConfig().ConfigName == params.ConfigNames[params.Pyrmont] {
baseUrl = "https://pyrmont.beaconcha.in/validator/"
} else if params.BeaconConfig().ConfigName == params.ConfigNames[params.Prater] {
baseUrl = "https://prater.beaconcha.in/validator/"
} else {
baseUrl = "https://beaconcha.in/validator/"

View File

@@ -101,7 +101,7 @@ type Config struct {
// registry.
func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, error) {
ctx, cancel := context.WithCancel(ctx)
s := &ValidatorService{
return &ValidatorService{
ctx: ctx,
cancel: cancel,
endpoint: cfg.Endpoint,
@@ -124,35 +124,34 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
logDutyCountDown: cfg.LogDutyCountDown,
Web3SignerConfig: cfg.Web3SignerConfig,
feeRecipientConfig: cfg.FeeRecipientConfig,
}
dialOpts := ConstructDialOptions(
s.maxCallRecvMsgSize,
s.withCert,
s.grpcRetries,
s.grpcRetryDelay,
)
if dialOpts == nil {
return s, nil
}
s.ctx = grpcutil.AppendHeaders(ctx, s.grpcHeaders)
conn, err := grpc.DialContext(ctx, s.endpoint, dialOpts...)
if err != nil {
return s, err
}
if s.withCert != "" {
log.Info("Established secure gRPC connection")
}
s.conn = conn
return s, nil
}, nil
}
// Start the validator service. Launches the main go routine for the validator
// client.
func (v *ValidatorService) Start() {
dialOpts := ConstructDialOptions(
v.maxCallRecvMsgSize,
v.withCert,
v.grpcRetries,
v.grpcRetryDelay,
)
if dialOpts == nil {
return
}
v.ctx = grpcutil.AppendHeaders(v.ctx, v.grpcHeaders)
conn, err := grpc.DialContext(v.ctx, v.endpoint, dialOpts...)
if err != nil {
log.Errorf("Could not dial endpoint: %s, %v", v.endpoint, err)
return
}
if v.withCert != "" {
log.Info("Established secure gRPC connection")
}
v.conn = conn
cache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: 1920, // number of keys to track.
MaxCost: 192, // maximum cost of cache, 1 item = 1 cost.

View File

@@ -2,6 +2,7 @@ package client
import (
"context"
"strings"
"testing"
"time"
@@ -32,11 +33,36 @@ func TestStop_CancelsContext(t *testing.T) {
}
}
func TestNew_Insecure(t *testing.T) {
func TestLifecycle(t *testing.T) {
hook := logTest.NewGlobal()
_, err := NewValidatorService(context.Background(), &Config{})
require.NoError(t, err)
// Use canceled context so that the run function exits immediately..
ctx, cancel := context.WithCancel(context.Background())
cancel()
validatorService := &ValidatorService{
ctx: ctx,
cancel: cancel,
endpoint: "merkle tries",
withCert: "alice.crt",
}
validatorService.Start()
require.NoError(t, validatorService.Stop(), "Could not stop service")
require.LogsContain(t, hook, "Stopping service")
}
func TestLifecycle_Insecure(t *testing.T) {
hook := logTest.NewGlobal()
// Use canceled context so that the run function exits immediately.
ctx, cancel := context.WithCancel(context.Background())
cancel()
validatorService := &ValidatorService{
ctx: ctx,
cancel: cancel,
endpoint: "merkle tries",
}
validatorService.Start()
require.LogsContain(t, hook, "You are using an insecure gRPC connection")
require.NoError(t, validatorService.Stop(), "Could not stop service")
require.LogsContain(t, hook, "Stopping service")
}
func TestStatus_NoConnectionError(t *testing.T) {
@@ -46,7 +72,9 @@ func TestStatus_NoConnectionError(t *testing.T) {
func TestStart_GrpcHeaders(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
// Use canceled context so that the run function exits immediately.
ctx, cancel := context.WithCancel(context.Background())
cancel()
for input, output := range map[string][]string{
"should-break": {},
"key=value": {"key", "value"},
@@ -59,9 +87,13 @@ func TestStart_GrpcHeaders(t *testing.T) {
"Authorization", "this is a valid value",
},
} {
cfg := &Config{GrpcHeadersFlag: input}
validatorService, err := NewValidatorService(ctx, cfg)
require.NoError(t, err)
validatorService := &ValidatorService{
ctx: ctx,
cancel: cancel,
endpoint: "merkle tries",
grpcHeaders: strings.Split(input, ","),
}
validatorService.Start()
md, _ := metadata.FromOutgoingContext(validatorService.ctx)
if input == "should-break" {
require.LogsContain(t, hook, "Incorrect gRPC header flag format. Skipping should-break")

View File

@@ -46,7 +46,7 @@ func TestServer_GetBeaconNodeConnection(t *testing.T) {
require.NoError(t, err)
want := &pb.NodeConnectionResponse{
BeaconNodeEndpoint: endpoint,
Connected: true,
Connected: false,
Syncing: false,
GenesisTime: uint64(time.Unix(0, 0).Unix()),
}