mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
114 Commits
d929e1dcaa
...
kintsugi-n
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e4b1b31f43 | ||
|
|
d2c197d040 | ||
|
|
87bc6aa5e5 | ||
|
|
5b5065b01d | ||
|
|
ee1c567561 | ||
|
|
ff1416c98d | ||
|
|
471c94031f | ||
|
|
9863fb3d6a | ||
|
|
f3c2d1a00b | ||
|
|
5d8879a4df | ||
|
|
abea0a11bc | ||
|
|
80ce1603bd | ||
|
|
ca478244e0 | ||
|
|
8a864b66a1 | ||
|
|
72f3b9e84b | ||
|
|
493e95060f | ||
|
|
e7e1ecd72f | ||
|
|
c286ac8b87 | ||
|
|
bde315224c | ||
|
|
00520705bc | ||
|
|
c7fcd804d7 | ||
|
|
985ac2e848 | ||
|
|
f4a0e98926 | ||
|
|
5f93ff10ea | ||
|
|
544248f60f | ||
|
|
3b41968510 | ||
|
|
7fc418042a | ||
|
|
9a03946706 | ||
|
|
33dd6dd5f2 | ||
|
|
56542e1958 | ||
|
|
e82d7b4c0b | ||
|
|
6cb69d8ff0 | ||
|
|
70b55a0191 | ||
|
|
50f4951194 | ||
|
|
1a14f2368d | ||
|
|
bb8cad58f1 | ||
|
|
05412c1f0e | ||
|
|
b03441fed8 | ||
|
|
fa7d7cef69 | ||
|
|
1caa6c969f | ||
|
|
eeb7d5bbfb | ||
|
|
d7c7d150b1 | ||
|
|
63c4d2eb2b | ||
|
|
9de1f694a0 | ||
|
|
8a79d06cbd | ||
|
|
5290ad93b8 | ||
|
|
2128208ef7 | ||
|
|
296323719c | ||
|
|
5e9583ea85 | ||
|
|
17196e0f80 | ||
|
|
c50d54000d | ||
|
|
85b3061d1b | ||
|
|
0146c5317a | ||
|
|
fcbc48ffd9 | ||
|
|
76ee51af9d | ||
|
|
370b0b97ed | ||
|
|
990ebd3fe3 | ||
|
|
54449c72e8 | ||
|
|
1dbd0b98eb | ||
|
|
09c3896c6b | ||
|
|
d494845e19 | ||
|
|
4d0c0f7234 | ||
|
|
bfe570b1aa | ||
|
|
56db696823 | ||
|
|
d312e15db8 | ||
|
|
907d4cf7e6 | ||
|
|
891353d6ad | ||
|
|
0adc08660c | ||
|
|
de31425dcd | ||
|
|
2094e0f21f | ||
|
|
2c6f554500 | ||
|
|
18a1e07711 | ||
|
|
5e432f5aaa | ||
|
|
284e2696cb | ||
|
|
7547aaa6ce | ||
|
|
953315c2cc | ||
|
|
9662d06b08 | ||
|
|
ecaea26ace | ||
|
|
63819e2690 | ||
|
|
a6d0cd06b3 | ||
|
|
2dbe4f5e67 | ||
|
|
2689d6814d | ||
|
|
69a681ddc0 | ||
|
|
7f9f1fd36c | ||
|
|
57c97eb561 | ||
|
|
f0f94a8193 | ||
|
|
87b0bf2c2a | ||
|
|
d8ad317dec | ||
|
|
ab5f488cf4 | ||
|
|
296d7464ad | ||
|
|
221c542e4f | ||
|
|
7ad32aaa96 | ||
|
|
3dc0969c0c | ||
|
|
0e18e835c3 | ||
|
|
8adfbfc382 | ||
|
|
68b0b5e0ce | ||
|
|
eede309e0f | ||
|
|
b11628dc53 | ||
|
|
ea3ae22d3b | ||
|
|
02bb39ddeb | ||
|
|
1618c1f55d | ||
|
|
73c8493fd7 | ||
|
|
a4f59a4f15 | ||
|
|
3c497efdb8 | ||
|
|
9f5daafbb7 | ||
|
|
11d7ffdfa8 | ||
|
|
c26b3305e6 | ||
|
|
38d8b63fbf | ||
|
|
aea67405c8 | ||
|
|
57d830f8b3 | ||
|
|
ac4b1ef4ea | ||
|
|
1d32119f5a | ||
|
|
3540cc7b05 | ||
|
|
191e7767a6 |
10
WORKSPACE
10
WORKSPACE
@@ -225,7 +225,7 @@ filegroup(
|
||||
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.1.3"
|
||||
consensus_spec_version = "v1.1.5"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -241,7 +241,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "e572f8c57e2dbbaeee056a600dc9d08396010dd5134a3a95e43c540470acf6f5",
|
||||
sha256 = "a7d7173d953494c0dfde432c9fc064c25d46d666b024749b3474ae0cdfc50050",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -257,7 +257,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "7e2f62eaae9fd541690cc61d252556d0c5deb585ca1873aacbeb5b02d06f1362",
|
||||
sha256 = "f86872061588c0197516b23025d39e9365b4716c112218a618739dc0d6f4666a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -273,7 +273,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "05cbb89810c8acd6c57c4773ddfd167305cd4539960e9b4d7b69e1a988b35ad2",
|
||||
sha256 = "7a06975360fd37fbb4694d0e06abb78d2a0835146c1d9b26d33569edff8b98f0",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -288,7 +288,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "0cef67b08448f7eb43bf66c464451c9e7a4852df8ef90555cca6d440e3436882",
|
||||
sha256 = "87d8089200163340484d61212fbdffbb5d9d03e1244622761dcb91e641a65761",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -34,6 +34,7 @@ go_library(
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -65,6 +66,9 @@ go_library(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_emicklei_dot//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//eth/catalyst:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
@@ -40,6 +40,7 @@ func logStateTransitionData(b block.BeaconBlock) {
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
}
|
||||
// TODO_MERGE: Add payload logging here
|
||||
log.Info("Finished applying state transition")
|
||||
}
|
||||
|
||||
@@ -51,11 +52,11 @@ func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"blockRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
}).Info("Synced new block")
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -254,6 +254,15 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case version.Merge:
|
||||
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = altair.ProcessEpochParticipation(ctx, headState, b, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("invalid state type provided: %T", headState.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
@@ -49,6 +49,14 @@ func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecutionEngineCaller to call execution engine.
|
||||
func WithExecutionEngineCaller(c powchain.ExecutionEngineCaller) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ExecutionEngineCaller = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDepositCache for deposit lifecycle after chain inclusion.
|
||||
func WithDepositCache(c *depositcache.DepositCache) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -1,16 +1,23 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/execution"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -21,6 +28,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -98,11 +106,42 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
return err
|
||||
}
|
||||
|
||||
body := signed.Block().Body()
|
||||
// TODO_MERGE: Break `ExecuteStateTransition` into per_slot and block processing so we can call `ExecutePayload` in the middle.
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
// TODO_MERGE: Notify execution client in the event of invalid conensus block
|
||||
return err
|
||||
}
|
||||
|
||||
if postState.Version() == version.Merge {
|
||||
executionEnabled, err := execution.Enabled(postState, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if executionEnabled {
|
||||
payload, err := body.ExecutionPayload()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get body execution payload")
|
||||
}
|
||||
// This is not the earliest we can call `ExecutePayload`, see above to do as the soonest we can call is after per_slot processing.
|
||||
_, err = s.cfg.ExecutionEngineCaller.ExecutePayload(ctx, executionPayloadToExecutableData(payload))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute payload")
|
||||
}
|
||||
|
||||
mergeBlock, err := execution.IsMergeBlock(postState, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if merge block is terminal")
|
||||
}
|
||||
if mergeBlock {
|
||||
if err := s.validateTerminalBlock(signed); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -155,6 +194,49 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
// Notify execution layer with fork choice head update if this is post merge block.
|
||||
if postState.Version() == version.Merge {
|
||||
executionEnabled, err := execution.Enabled(postState, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if executionEnabled {
|
||||
// Spawn the update task, without waiting for it to complete.
|
||||
go func() {
|
||||
headPayload, err := s.headBlock().Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
log.WithError(err)
|
||||
return
|
||||
}
|
||||
// TODO_MERGE: Loading the finalized block from DB on per block is not ideal. Finalized block should be cached here
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
if err != nil {
|
||||
log.WithError(err)
|
||||
return
|
||||
}
|
||||
finalizedBlockHash := params.BeaconConfig().ZeroHash[:]
|
||||
if finalizedBlock != nil && finalizedBlock.Version() == version.Merge {
|
||||
finalizedPayload, err := finalizedBlock.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
log.WithError(err)
|
||||
return
|
||||
}
|
||||
finalizedBlockHash = finalizedPayload.BlockHash
|
||||
}
|
||||
|
||||
f := catalyst.ForkchoiceStateV1{
|
||||
HeadBlockHash: common.BytesToHash(headPayload.BlockHash),
|
||||
SafeBlockHash: common.BytesToHash(headPayload.BlockHash),
|
||||
FinalizedBlockHash: common.BytesToHash(finalizedBlockHash),
|
||||
}
|
||||
if err := s.cfg.ExecutionEngineCaller.NotifyForkChoiceValidated(ctx, f); err != nil {
|
||||
log.WithError(err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -475,3 +557,99 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validates terminal block hash in the event of manual overrides before checking for total difficulty.
|
||||
//
|
||||
// def validate_merge_block(block: BeaconBlock) -> None:
|
||||
// """
|
||||
// Check the parent PoW block of execution payload is a valid terminal PoW block.
|
||||
//
|
||||
// Note: Unavailable PoW block(s) may later become available,
|
||||
// and a client software MAY delay a call to ``validate_merge_block``
|
||||
// until the PoW block(s) become available.
|
||||
// """
|
||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// return block.block_hash == TERMINAL_BLOCK_HASH
|
||||
//
|
||||
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
||||
// # Check if `pow_block` is available
|
||||
// assert pow_block is not None
|
||||
// pow_parent = get_pow_block(pow_block.parent_hash)
|
||||
// # Check if `pow_parent` is available
|
||||
// assert pow_parent is not None
|
||||
// # Check if `pow_block` is a valid terminal PoW block
|
||||
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
|
||||
func (s *Service) validateTerminalBlock(b block.SignedBeaconBlock) error {
|
||||
payload, err := b.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) != [32]byte{} {
|
||||
// `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(b.Block().Slot()) {
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
transitionBlk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(common.BytesToHash(payload.ParentHash))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get transition block")
|
||||
}
|
||||
parentTransitionBlk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(common.HexToHash(transitionBlk.ParentHash))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get transition parent block")
|
||||
}
|
||||
if !validTerminalPowBlock(transitionBlk, parentTransitionBlk) {
|
||||
return errors.New("invalid difficulty for terminal block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validates terminal pow block by comparing own total difficulty with parent's total difficulty.
|
||||
//
|
||||
// def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool:
|
||||
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
|
||||
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
|
||||
func validTerminalPowBlock(transitionBlock *powchain.ExecutionBlock, transitionParentBlock *powchain.ExecutionBlock) bool {
|
||||
transitionBlkTTD, err := uint256.FromHex(transitionBlock.TotalDifficulty)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
transitionParentBlkTTD, err := uint256.FromHex(transitionParentBlock.TotalDifficulty)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
terminalTotalDifficulty := uint256.NewInt(params.BeaconConfig().TerminalTotalDifficulty)
|
||||
totalDifficultyReached := transitionBlkTTD.Cmp(terminalTotalDifficulty) >= 0
|
||||
parentTotalDifficultyValid := terminalTotalDifficulty.Cmp(transitionParentBlkTTD) >= 0
|
||||
return totalDifficultyReached && parentTotalDifficultyValid
|
||||
}
|
||||
|
||||
func executionPayloadToExecutableData(payload *ethpb.ExecutionPayload) *catalyst.ExecutableDataV1 {
|
||||
baseFeePerGas := new(big.Int)
|
||||
// TODO_MERGE: The conversion from 32bytes to big int is broken. This assumes base fee per gas in single digit
|
||||
baseFeePerGas.SetBytes([]byte{payload.BaseFeePerGas[0]})
|
||||
|
||||
return &catalyst.ExecutableDataV1{
|
||||
BlockHash: common.BytesToHash(payload.BlockHash),
|
||||
ParentHash: common.BytesToHash(payload.ParentHash),
|
||||
Coinbase: common.BytesToAddress(payload.Coinbase),
|
||||
StateRoot: common.BytesToHash(payload.StateRoot),
|
||||
ReceiptRoot: common.BytesToHash(payload.ReceiptRoot),
|
||||
LogsBloom: payload.LogsBloom,
|
||||
Random: common.BytesToHash(payload.Random),
|
||||
Number: payload.BlockNumber,
|
||||
GasLimit: payload.GasLimit,
|
||||
GasUsed: payload.GasUsed,
|
||||
Timestamp: payload.Timestamp,
|
||||
ExtraData: payload.ExtraData,
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
Transactions: payload.Transactions,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,6 +84,8 @@ type config struct {
|
||||
StateGen *stategen.State
|
||||
SlasherAttestationsFeed *event.Feed
|
||||
WeakSubjectivityCheckpt *ethpb.Checkpoint
|
||||
BlockFetcher powchain.POWBlockFetcher
|
||||
ExecutionEngineCaller powchain.ExecutionEngineCaller
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -149,6 +151,7 @@ func (s *Service) Start() {
|
||||
// If the chain has already been initialized, simply start the block processing routine.
|
||||
if beaconState != nil && !beaconState.IsNil() {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
|
||||
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
|
||||
s.cfg.AttService.SetGenesisTime(beaconState.GenesisTime())
|
||||
if err := s.initializeChainInfo(s.ctx); err != nil {
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -39,6 +39,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
11
beacon-chain/cache/sync_committee_head_state.go
vendored
11
beacon-chain/cache/sync_committee_head_state.go
vendored
@@ -7,6 +7,7 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
|
||||
)
|
||||
|
||||
@@ -31,10 +32,6 @@ func (c *SyncCommitteeHeadStateCache) Put(slot types.Slot, st state.BeaconState)
|
||||
if st == nil || st.IsNil() {
|
||||
return ErrNilValueProvided
|
||||
}
|
||||
_, ok := st.(*stateAltair.BeaconState)
|
||||
if !ok {
|
||||
return ErrIncorrectType
|
||||
}
|
||||
c.cache.Add(slot, st)
|
||||
return nil
|
||||
}
|
||||
@@ -47,9 +44,13 @@ func (c *SyncCommitteeHeadStateCache) Get(slot types.Slot) (state.BeaconState, e
|
||||
if !exists {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
var st state.BeaconState
|
||||
st, ok := val.(*stateAltair.BeaconState)
|
||||
if !ok {
|
||||
return nil, ErrIncorrectType
|
||||
st, ok = val.(*v3.BeaconState)
|
||||
if !ok {
|
||||
return nil, ErrIncorrectType
|
||||
}
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -264,6 +265,9 @@ func AttestationsDelta(beaconState state.BeaconStateAltair, bal *precompute.Bala
|
||||
baseRewardMultiplier := increment * factor / math.IntegerSquareRoot(bal.ActiveCurrentEpoch)
|
||||
leak := helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch)
|
||||
inactivityDenominator := cfg.InactivityScoreBias * cfg.InactivityPenaltyQuotientAltair
|
||||
if beaconState.Version() == version.Merge {
|
||||
inactivityDenominator = cfg.InactivityScoreBias * cfg.InactivityPenaltyQuotientMerge
|
||||
}
|
||||
|
||||
for i, v := range vals {
|
||||
rewards[i], penalties[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
|
||||
|
||||
@@ -213,7 +213,7 @@ func ValidateSyncMessageTime(slot types.Slot, genesisTime time.Time, clockDispar
|
||||
// Verify sync message slot is within the time range.
|
||||
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
|
||||
return fmt.Errorf(
|
||||
"sync message slot %d not within allowable range of %d to %d (current slot)",
|
||||
"sync message slot %d not within allowable range of slots %d to %d",
|
||||
slot,
|
||||
uint64(lowerBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
|
||||
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -69,9 +70,16 @@ func ProcessEpoch(ctx context.Context, state state.BeaconStateAltair) (state.Bea
|
||||
}
|
||||
|
||||
// Modified in Altair.
|
||||
state, err = e.ProcessSlashings(state, params.BeaconConfig().ProportionalSlashingMultiplierAltair)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if state.Version() == version.Altair {
|
||||
state, err = e.ProcessSlashings(state, params.BeaconConfig().ProportionalSlashingMultiplierAltair)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
state, err = e.ProcessSlashings(state, params.BeaconConfig().ProportionalSlashingMultiplierMerge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
state, err = e.ProcessEth1DataReset(state)
|
||||
|
||||
@@ -83,6 +83,8 @@ func ProcessAttesterSlashing(
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotient
|
||||
case beaconState.Version() == version.Altair:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
case beaconState.Version() == version.Merge:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientMerge
|
||||
default:
|
||||
return nil, errors.New("unknown state version")
|
||||
}
|
||||
|
||||
@@ -81,6 +81,8 @@ func ProcessProposerSlashing(
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotient
|
||||
case beaconState.Version() == version.Altair:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
case beaconState.Version() == version.Merge:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientMerge
|
||||
default:
|
||||
return nil, errors.New("unknown state version")
|
||||
}
|
||||
|
||||
24
beacon-chain/core/execution/BUILD.bazel
Normal file
24
beacon-chain/core/execution/BUILD.bazel
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"process.go",
|
||||
"upgrade.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/execution",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
226
beacon-chain/core/execution/process.go
Normal file
226
beacon-chain/core/execution/process.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// IsMergeComplete returns true if the transition merge has happened.
|
||||
//
|
||||
// Spec code:
|
||||
// def is_merge_complete(state: BeaconState) -> bool:
|
||||
// return state.latest_execution_payload_header != ExecutionPayloadHeader()
|
||||
func IsMergeComplete(st state.BeaconState) (bool, error) {
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// TODO_MERGE: Benchmark this for faster compare.
|
||||
return !ssz.DeepEqual(h, EmptypayloadHeader()), nil
|
||||
}
|
||||
|
||||
// IsMergeBlock returns true if input block can become the merge block.
|
||||
//
|
||||
// Spec code:
|
||||
// def is_merge_block(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
// return not is_merge_complete(state) and body.execution_payload != ExecutionPayload()
|
||||
func IsMergeBlock(st state.BeaconState, blk block.BeaconBlockBody) (bool, error) {
|
||||
mergeComplete, err := IsMergeComplete(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if mergeComplete {
|
||||
return false, err
|
||||
}
|
||||
|
||||
payload, err := blk.ExecutionPayload()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// TODO_MERGE: Benchmark this for faster compare.
|
||||
return !ssz.DeepEqual(payload, EmptyPayload()), nil
|
||||
}
|
||||
|
||||
// Enabled returns true if the beacon chain can begin executing.
|
||||
//
|
||||
// Spec code:
|
||||
// def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
// return is_merge_block(state, body) or is_merge_complete(state)
|
||||
func Enabled(st state.BeaconState, blk block.BeaconBlockBody) (bool, error) {
|
||||
mergeBlock, err := IsMergeBlock(st, blk)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if mergeBlock {
|
||||
return true, nil
|
||||
}
|
||||
return IsMergeComplete(st)
|
||||
}
|
||||
|
||||
// ProcessPayload processes input execution payload using beacon state.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
|
||||
// # Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
// if is_merge_complete(state):
|
||||
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
// # Verify random
|
||||
// assert payload.random == get_randao_mix(state, get_current_epoch(state))
|
||||
// # Verify timestamp
|
||||
// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
// # Verify the execution payload is valid
|
||||
// assert execution_engine.execute_payload(payload)
|
||||
// # Cache execution payload header
|
||||
// state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
// parent_hash=payload.parent_hash,
|
||||
// coinbase=payload.coinbase,
|
||||
// state_root=payload.state_root,
|
||||
// receipt_root=payload.receipt_root,
|
||||
// logs_bloom=payload.logs_bloom,
|
||||
// random=payload.random,
|
||||
// block_number=payload.block_number,
|
||||
// gas_limit=payload.gas_limit,
|
||||
// gas_used=payload.gas_used,
|
||||
// timestamp=payload.timestamp,
|
||||
// extra_data=payload.extra_data,
|
||||
// base_fee_per_gas=payload.base_fee_per_gas,
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, payload *ethpb.ExecutionPayload) (state.BeaconState, error) {
|
||||
if err := validatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validatePayload(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This deviate with spec definition. It supposed to perform `execution_engine.on_payload(payload)` here.
|
||||
// Core pkg contains all pure functions. They don't have access to execution engine i.e. rpc service.
|
||||
// The soonest we can do this is after state transition.
|
||||
|
||||
header, err := payloadToHeader(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// This validates if payload is valid according to beacon state.
|
||||
// These validation steps ONLY apply to post merge.
|
||||
func validatePayloadWhenMergeCompletes(st state.BeaconState, payload *ethpb.ExecutionPayload) error {
|
||||
complete, err := IsMergeComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !complete {
|
||||
return nil
|
||||
}
|
||||
|
||||
header, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, header.BlockHash) {
|
||||
return errors.New("incorrect block hash")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This validates if payload is valid according to beacon state.
|
||||
// These validation steps apply to both pre merge and post merge.
|
||||
func validatePayload(st state.BeaconState, payload *ethpb.ExecutionPayload) error {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(payload.Random, random) {
|
||||
return errors.New("incorrect random")
|
||||
}
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.Timestamp != uint64(t.Unix()) {
|
||||
return errors.New("incorrect timestamp")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This converts `payload` into execution payload header format.
|
||||
func payloadToHeader(payload *ethpb.ExecutionPayload) (*ethpb.ExecutionPayloadHeader, error) {
|
||||
txRoot, err := ssz.TransactionsRoot(payload.Transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ExecutionPayloadHeader{
|
||||
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
|
||||
Coinbase: bytesutil.SafeCopyBytes(payload.Coinbase),
|
||||
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
|
||||
ReceiptRoot: bytesutil.SafeCopyBytes(payload.ReceiptRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
|
||||
Random: bytesutil.SafeCopyBytes(payload.Random),
|
||||
BlockNumber: payload.BlockNumber,
|
||||
GasLimit: payload.GasLimit,
|
||||
GasUsed: payload.GasUsed,
|
||||
Timestamp: payload.Timestamp,
|
||||
ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas),
|
||||
BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash),
|
||||
TransactionsRoot: txRoot[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EmptyPayload represents `ExecutionPayload()` in spec.
|
||||
func EmptyPayload() *ethpb.ExecutionPayload {
|
||||
return ðpb.ExecutionPayload{
|
||||
ParentHash: make([]byte, 32),
|
||||
Coinbase: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
Random: make([]byte, 32),
|
||||
BlockNumber: 0,
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
Timestamp: 0,
|
||||
ExtraData: nil,
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// This represents `ExecutionPayloadHeader()` in spec.
|
||||
func EmptypayloadHeader() *ethpb.ExecutionPayloadHeader {
|
||||
return ðpb.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, 32),
|
||||
Coinbase: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
Random: make([]byte, 32),
|
||||
BlockNumber: 0,
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
Timestamp: 0,
|
||||
ExtraData: nil,
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
85
beacon-chain/core/execution/upgrade.go
Normal file
85
beacon-chain/core/execution/upgrade.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UpgradeToMerge updates input state to return the version Merge state.
|
||||
func UpgradeToMerge(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := state.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := state.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateMerge{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().MergeForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: ðpb.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, 32),
|
||||
Coinbase: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
Random: make([]byte, 32),
|
||||
BlockNumber: 0,
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
Timestamp: 0,
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
|
||||
return v2.InitializeFromProto(s)
|
||||
}
|
||||
@@ -2,9 +2,9 @@ package signing
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/eth2-types"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Domain returns the domain version for BLS private key to sign and verify.
|
||||
|
||||
@@ -3,9 +3,9 @@ package signing
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/eth2-types"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
@@ -60,6 +60,16 @@ func CanUpgradeToAltair(slot types.Slot) bool {
|
||||
return epochStart && altairEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToMerge returns true if the input `slot` can upgrade to Merge fork.
|
||||
//
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH
|
||||
func CanUpgradeToMerge(slot types.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
mergeEpoch := slots.ToEpoch(slot) == params.BeaconConfig().MergeForkEpoch
|
||||
return epochStart && mergeEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -30,6 +30,7 @@ go_library(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
e "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/execution"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -250,6 +251,12 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch")
|
||||
}
|
||||
case version.Merge:
|
||||
state, err = altair.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("beacon state should have a version")
|
||||
}
|
||||
@@ -266,6 +273,14 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToMerge(state.Slot()) {
|
||||
state, err = execution.UpgradeToMerge(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if highestSlot < state.Slot() {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/execution"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
@@ -152,7 +153,7 @@ func CalculateStateRoot(
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
if signed.Version() == version.Altair {
|
||||
if signed.Version() == version.Altair || signed.Version() == version.Merge {
|
||||
sa, err := signed.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
@@ -198,7 +199,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if signed.Version() == version.Altair {
|
||||
if signed.Version() == version.Altair || signed.Version() == version.Merge {
|
||||
sa, err := signed.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -278,6 +279,11 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Merge:
|
||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("block does not have correct version")
|
||||
}
|
||||
@@ -302,7 +308,7 @@ func ProcessBlockForStateRoot(
|
||||
body := blk.Body()
|
||||
bodyRoot, err := body.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not hash tree root beacon block body")
|
||||
}
|
||||
state, err = b.ProcessBlockHeaderNoVerify(ctx, state, blk.Slot(), blk.ProposerIndex(), blk.ParentRoot(), bodyRoot[:])
|
||||
if err != nil {
|
||||
@@ -310,6 +316,24 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not process block header")
|
||||
}
|
||||
|
||||
if state.Version() == version.Merge {
|
||||
enabled, err := execution.Enabled(state, blk.Body())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if enabled {
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = execution.ProcessPayload(state, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state, err = b.ProcessRandaoNoVerify(state, signed.Block().Body().RandaoReveal())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
|
||||
@@ -42,6 +42,7 @@ go_library(
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
|
||||
@@ -9,3 +9,10 @@ func hasAltairKey(enc []byte) bool {
|
||||
}
|
||||
return bytes.Equal(enc[:len(altairKey)], altairKey)
|
||||
}
|
||||
|
||||
func hasMergeKey(enc []byte) bool {
|
||||
if len(mergeKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(mergeKey)], mergeKey)
|
||||
}
|
||||
|
||||
@@ -609,6 +609,13 @@ func unmarshalBlock(_ context.Context, enc []byte) (block.SignedBeaconBlock, err
|
||||
return nil, err
|
||||
}
|
||||
return wrapper.WrappedAltairSignedBeaconBlock(rawBlock)
|
||||
case hasMergeKey(enc):
|
||||
rawBlock := ðpb.SignedBeaconBlockMerge{}
|
||||
err := rawBlock.UnmarshalSSZ(enc[len(mergeKey):])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wrapper.WrappedMergeSignedBeaconBlock(rawBlock)
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock := ðpb.SignedBeaconBlock{}
|
||||
@@ -627,6 +634,8 @@ func marshalBlock(_ context.Context, blk block.SignedBeaconBlock) ([]byte, error
|
||||
return nil, err
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Merge:
|
||||
return snappy.Encode(nil, append(mergeKey, obj...)), nil
|
||||
case version.Altair:
|
||||
return snappy.Encode(nil, append(altairKey, obj...)), nil
|
||||
case version.Phase0:
|
||||
|
||||
@@ -46,6 +46,7 @@ var (
|
||||
// Altair key used to identify object is altair compatible.
|
||||
// Objects that are only compatible with altair should be prefixed with such key.
|
||||
altairKey = []byte("altair")
|
||||
mergeKey = []byte("merge")
|
||||
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
lastArchivedIndexKey = []byte("last-archived")
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/genesis"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -385,6 +386,20 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasMergeKey(enc):
|
||||
// Marshal state bytes to altair beacon state.
|
||||
protoState := ðpb.BeaconStateMerge{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(mergeKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for altair")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return v3.InitializeFromProtoUnsafe(protoState)
|
||||
case hasAltairKey(enc):
|
||||
// Marshal state bytes to altair beacon state.
|
||||
protoState := ðpb.BeaconStateAltair{}
|
||||
@@ -438,6 +453,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(altairKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateMerge:
|
||||
rState, ok := st.InnerStateUnsafe().(*ethpb.BeaconStateMerge)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
if rState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
rawObj, err := rState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(mergeKey, rawObj...)), nil
|
||||
default:
|
||||
return nil, errors.New("invalid inner state")
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -95,3 +96,31 @@ func configureInteropConfig(cliCtx *cli.Context) {
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
}
|
||||
}
|
||||
|
||||
func configureExecutionMode(cliCtx *cli.Context) {
|
||||
if cliCtx.IsSet(flags.EnableMerge.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.EnabledMerge = cliCtx.Bool(flags.EnableMerge.Name)
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalTotalDifficulty = cliCtx.Uint64(flags.TerminalTotalDifficultyOverride.Name)
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.TerminalBlockHashActivationEpoch = types.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
if cliCtx.IsSet(flags.FeeRecipient.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.FeeRecipient = common.HexToAddress(cliCtx.String(flags.FeeRecipient.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,6 +105,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
configureEth1Config(cliCtx)
|
||||
configureNetwork(cliCtx)
|
||||
configureInteropConfig(cliCtx)
|
||||
configureExecutionMode(cliCtx)
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -492,6 +493,7 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithDatabase(b.db),
|
||||
blockchain.WithDepositCache(b.depositCache),
|
||||
blockchain.WithChainStartFetcher(web3Service),
|
||||
blockchain.WithExecutionEngineCaller(web3Service),
|
||||
blockchain.WithAttestationPool(b.attestationPool),
|
||||
blockchain.WithExitPool(b.exitPool),
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
@@ -718,6 +720,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
ExecutionEngineCaller: web3Service,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
|
||||
@@ -14,7 +14,8 @@ func (s *Service) forkWatcher() {
|
||||
select {
|
||||
case currSlot := <-slotTicker.C():
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch {
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().MergeForkEpoch {
|
||||
// If we are in the fork epoch, we update our enr with
|
||||
// the updated fork digest. These repeatedly does
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
|
||||
@@ -26,8 +26,13 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
// versioned by epoch.
|
||||
func GossipTopicMappings(topic string, epoch types.Epoch) proto.Message {
|
||||
if topic == BlockSubnetTopicFormat && epoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.SignedBeaconBlockAltair{}
|
||||
if topic == BlockSubnetTopicFormat {
|
||||
if epoch >= params.BeaconConfig().MergeForkEpoch {
|
||||
return ðpb.SignedBeaconBlockMerge{}
|
||||
}
|
||||
if epoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
return ðpb.SignedBeaconBlockAltair{}
|
||||
}
|
||||
}
|
||||
return gossipTopicMappings[topic]
|
||||
}
|
||||
@@ -52,4 +57,5 @@ func init() {
|
||||
}
|
||||
// Specially handle Altair Objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockMerge{})] = BlockSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -24,18 +24,28 @@ func TestMappingHasNoDuplicates(t *testing.T) {
|
||||
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig()
|
||||
forkEpoch := eth2types.Epoch(100)
|
||||
bCfg.AltairForkEpoch = forkEpoch
|
||||
altairForkEpoch := eth2types.Epoch(100)
|
||||
mergeForkEpoch := eth2types.Epoch(200)
|
||||
|
||||
bCfg.AltairForkEpoch = altairForkEpoch
|
||||
bCfg.MergeForkEpoch = mergeForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = eth2types.Epoch(100)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.MergeForkVersion)] = eth2types.Epoch(200)
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
// Before Fork
|
||||
// Phase 0
|
||||
pMessage := GossipTopicMappings(BlockSubnetTopicFormat, 0)
|
||||
_, ok := pMessage.(*ethpb.SignedBeaconBlock)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// After Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, forkEpoch)
|
||||
// Altair Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockAltair)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Merge Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, mergeForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockMerge)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
}
|
||||
|
||||
BIN
beacon-chain/p2p/metaData
Normal file
BIN
beacon-chain/p2p/metaData
Normal file
Binary file not shown.
@@ -37,17 +37,24 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
if parts[1] != "eth2" {
|
||||
return false
|
||||
}
|
||||
fd, err := s.currentForkDigest()
|
||||
phase0ForkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine fork digest")
|
||||
return false
|
||||
}
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
|
||||
altairForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine next fork digest")
|
||||
log.WithError(err).Error("Could not determine altair fork digest")
|
||||
return false
|
||||
}
|
||||
if parts[2] != fmt.Sprintf("%x", fd) && parts[2] != fmt.Sprintf("%x", digest) {
|
||||
mergeForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().MergeForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine merge fork digest")
|
||||
return false
|
||||
}
|
||||
if parts[2] != fmt.Sprintf("%x", phase0ForkDigest) &&
|
||||
parts[2] != fmt.Sprintf("%x", altairForkDigest) &&
|
||||
parts[2] != fmt.Sprintf("%x", mergeForkDigest) {
|
||||
return false
|
||||
}
|
||||
if parts[4] != encoder.ProtocolSuffixSSZSnappy {
|
||||
|
||||
@@ -102,7 +102,7 @@ func TestTopicFromMessage_CorrectType(t *testing.T) {
|
||||
assert.Equal(t, SchemaVersionV1, version)
|
||||
}
|
||||
|
||||
// After Fork
|
||||
// Altair Fork
|
||||
for m := range messageMapping {
|
||||
topic, err := TopicFromMessage(m, forkEpoch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -39,6 +39,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() (block.SignedBeaconBlock, error) {
|
||||
return wrapper.WrappedAltairSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().MergeForkVersion): func() (block.SignedBeaconBlock, error) {
|
||||
return wrapper.WrappedMergeSignedBeaconBlock(ðpb.SignedBeaconBlockMerge{Block: ðpb.BeaconBlockMerge{}})
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our metadata map.
|
||||
@@ -49,5 +52,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().MergeForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"block_cache.go",
|
||||
"block_reader.go",
|
||||
"deposit.go",
|
||||
"execution_engine.go",
|
||||
"log.go",
|
||||
"log_processing.go",
|
||||
"prometheus.go",
|
||||
@@ -29,6 +30,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
@@ -46,7 +48,9 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//eth/catalyst:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -173,6 +174,28 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
return s.findMoreTargetEth1Block(ctx, big.NewInt(int64(estimatedBlk)), time)
|
||||
}
|
||||
|
||||
// BlockByHash returns the pow block by hash.
|
||||
func (s *Service) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockByHash")
|
||||
defer span.End()
|
||||
|
||||
if s.eth1DataFetcher == nil {
|
||||
return nil, errors.New("nil eth1DataFetcher")
|
||||
}
|
||||
return s.eth1DataFetcher.BlockByHash(ctx, hash)
|
||||
}
|
||||
|
||||
// BlockByNumber returns the pow block by number.
|
||||
func (s *Service) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockByNumber")
|
||||
defer span.End()
|
||||
|
||||
if s.eth1DataFetcher == nil {
|
||||
return nil, errors.New("nil eth1DataFetcher")
|
||||
}
|
||||
return s.eth1DataFetcher.BlockByNumber(ctx, number)
|
||||
}
|
||||
|
||||
// Performs a search to find a target eth1 block which is earlier than or equal to the
|
||||
// target time. This method is used when head.time > targetTime
|
||||
func (s *Service) findLessTargetEth1Block(ctx context.Context, startBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
|
||||
386
beacon-chain/powchain/execution_engine.go
Normal file
386
beacon-chain/powchain/execution_engine.go
Normal file
@@ -0,0 +1,386 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var errNoExecutionEngineConnection = errors.New("can't connect to execution engine")
|
||||
var errInvalidPayload = errors.New("invalid payload")
|
||||
var errSyncing = errors.New("syncing")
|
||||
|
||||
// ExecutionEngineCaller defines methods that wraps around execution engine API calls to enable other prysm services to interact with.
|
||||
type ExecutionEngineCaller interface {
|
||||
// PreparePayload is a wrapper on top of `CatalystClient` to abstract out `types.AssembleBlockParams`.
|
||||
PreparePayload(ctx context.Context, forkchoiceState catalyst.ForkchoiceStateV1, payloadAttributes catalyst.PayloadAttributesV1) (uint64, error)
|
||||
// GetPayload is a wrapper on top of `CatalystClient`.
|
||||
GetPayload(ctx context.Context, payloadID uint64) (*catalyst.ExecutableDataV1, error)
|
||||
// NotifyForkChoiceValidated is the wrapper on top of `CatalystClient` to abstract out `types.ConsensusValidatedParams`.
|
||||
NotifyForkChoiceValidated(ctx context.Context, forkchoiceState catalyst.ForkchoiceStateV1) error
|
||||
// ExecutePayload is the wrapper on top of `CatalystClient` to abstract out `types.ForkChoiceParams`.
|
||||
ExecutePayload(ctx context.Context, data *catalyst.ExecutableDataV1) ([]byte, error)
|
||||
// LatestExecutionBlock returns the latest execution block of the pow chain.
|
||||
LatestExecutionBlock() (*ExecutionBlock, error)
|
||||
// ExecutionBlockByHash returns the execution block of a given block hash.
|
||||
ExecutionBlockByHash(blockHash common.Hash) (*ExecutionBlock, error)
|
||||
}
|
||||
|
||||
type EngineRequest struct {
|
||||
JsonRPC string `json:"jsonrpc"`
|
||||
Method string `json:"method"`
|
||||
Params []interface{} `json:"params"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
type ErrorRespond struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type GetPayloadRespond struct {
|
||||
JsonRPC string `json:"jsonrpc"`
|
||||
ExecutableData *catalyst.ExecutableDataV1 `json:"result"`
|
||||
Id int `json:"id"`
|
||||
Error ErrorRespond `json:"error"`
|
||||
}
|
||||
|
||||
type PreparePayloadRespond struct {
|
||||
JsonRPC string `json:"jsonrpc"`
|
||||
Result PayloadIDRespond `json:"result"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
type PayloadIDRespond struct {
|
||||
PayloadID string `json:"payloadId"`
|
||||
}
|
||||
|
||||
type ForkchoiceUpdatedRespond struct {
|
||||
JsonRPC string `json:"jsonrpc"`
|
||||
Result ForkchoiceUpdatedResult `json:"result"`
|
||||
Id int `json:"id"`
|
||||
Error ErrorRespond `json:"error"`
|
||||
}
|
||||
|
||||
type ForkchoiceUpdatedResult struct {
|
||||
Status string `json:"status"`
|
||||
PayloadID string `json:"payloadId"`
|
||||
}
|
||||
|
||||
type ExecutePayloadRespond struct {
|
||||
JsonRPC string `json:"jsonrpc"`
|
||||
Result ExecutePayloadResult `json:"result"`
|
||||
Id int `json:"id"`
|
||||
Error ErrorRespond `json:"error"`
|
||||
}
|
||||
|
||||
type ExecutePayloadResult struct {
|
||||
Status string `json:"status"`
|
||||
LatestValidHash string `json:"latestValidHash"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type ExecutionBlockRespond struct {
|
||||
JsonRPC string `json:"jsonrpc"`
|
||||
Result *ExecutionBlock `json:"result"`
|
||||
Id int `json:"id"`
|
||||
}
|
||||
|
||||
type ExecutionBlock struct {
|
||||
ParentHash string `json:"parentHash"`
|
||||
Sha3Uncles string `json:"sha3Uncles"`
|
||||
Miner string `json:"miner"`
|
||||
StateRoot string `json:"stateRoot"`
|
||||
TransactionsRoot string `json:"transactionsRoot"`
|
||||
ReceiptsRoot string `json:"receiptsRoot"`
|
||||
LogsBloom string `json:"logsBloom"`
|
||||
Difficulty string `json:"difficulty"`
|
||||
Number string `json:"number"`
|
||||
GasLimit string `json:"gasLimit"`
|
||||
GasUsed string `json:"gasUsed"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extraData"`
|
||||
MixHash string `json:"mixHash"`
|
||||
Nonce string `json:"nonce"`
|
||||
TotalDifficulty string `json:"totalDifficulty"`
|
||||
BaseFeePerGas string `json:"baseFeePerGas"`
|
||||
Size string `json:"size"`
|
||||
Hash string `json:"hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Uncles []string `json:"uncles"`
|
||||
}
|
||||
|
||||
//GetPayload returns the most recent version of the execution payload that has been built since the corresponding
|
||||
//call to `PreparePayload` method. It returns the `ExecutionPayload` object.
|
||||
//Engine API definition:
|
||||
// https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_getpayloadv1
|
||||
func (s *Service) GetPayload(ctx context.Context, payloadID uint64) (*catalyst.ExecutableDataV1, error) {
|
||||
reqBody := &EngineRequest{
|
||||
JsonRPC: "2.0",
|
||||
Method: "engine_getPayloadV1",
|
||||
Params: []interface{}{hexutil.EncodeUint64(payloadID)},
|
||||
}
|
||||
enc, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("POST", s.currHttpEndpoint.Url, bytes.NewBuffer(enc))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
client := &http.Client{}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var respond GetPayloadRespond
|
||||
if err := json.Unmarshal(body, &respond); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if respond.Error.Code != 0 {
|
||||
return nil, fmt.Errorf("could not call engine_getPayloadV1, code: %d, message: %s", respond.Error.Code, respond.Error.Message)
|
||||
}
|
||||
|
||||
return respond.ExecutableData, nil
|
||||
}
|
||||
|
||||
// ExecutePayload executes execution payload by calling execution engine.
|
||||
// Engine API definition:
|
||||
// https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_executepayloadv1
|
||||
func (s *Service) ExecutePayload(ctx context.Context, data *catalyst.ExecutableDataV1) ([]byte, error) {
|
||||
reqBody := &EngineRequest{
|
||||
JsonRPC: "2.0",
|
||||
Method: "engine_executePayloadV1",
|
||||
Params: []interface{}{data},
|
||||
}
|
||||
enc, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("POST", s.currHttpEndpoint.Url, bytes.NewBuffer(enc))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
client := &http.Client{}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var respond ExecutePayloadRespond
|
||||
if err := json.Unmarshal(body, &respond); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if respond.Error.Code != 0 {
|
||||
return nil, fmt.Errorf("could not call engine_executePayloadV1, code: %d, message: %s", respond.Error.Code, respond.Error.Message)
|
||||
}
|
||||
|
||||
if respond.Result.Status == catalyst.INVALID.Status {
|
||||
return common.FromHex(respond.Result.LatestValidHash), errInvalidPayload
|
||||
}
|
||||
if respond.Result.Status == catalyst.SYNCING.Status {
|
||||
return common.FromHex(respond.Result.LatestValidHash), errSyncing
|
||||
}
|
||||
|
||||
return common.FromHex(respond.Result.LatestValidHash), nil
|
||||
}
|
||||
|
||||
// NotifyForkChoiceValidated notifies execution engine on fork choice updates.
|
||||
// Engine API definition:
|
||||
// https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_forkchoiceupdatedv1
|
||||
func (s *Service) NotifyForkChoiceValidated(ctx context.Context, forkchoiceState catalyst.ForkchoiceStateV1) error {
|
||||
reqBody := &EngineRequest{
|
||||
JsonRPC: "2.0",
|
||||
Method: "engine_forkchoiceUpdatedV1",
|
||||
Params: []interface{}{forkchoiceState, nil},
|
||||
}
|
||||
enc, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", s.currHttpEndpoint.Url, bytes.NewBuffer(enc))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
client := &http.Client{}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var respond ForkchoiceUpdatedRespond
|
||||
if err := json.Unmarshal(body, &respond); err != nil {
|
||||
return err
|
||||
}
|
||||
if respond.Error.Code != 0 {
|
||||
return fmt.Errorf("could not call engine_forkchoiceUpdatedV1, code: %d, message: %s", respond.Error.Code, respond.Error.Message)
|
||||
}
|
||||
if respond.Result.Status == catalyst.SYNCING.Status {
|
||||
return errSyncing
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PreparePayload signals execution engine to prepare execution payload along with the latest fork choice state.
|
||||
// It reuses `engine_forkchoiceUpdatedV1` end point to prepare payload.
|
||||
// Engine API definition:
|
||||
// https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_forkchoiceupdatedv1
|
||||
func (s *Service) PreparePayload(ctx context.Context, forkchoiceState catalyst.ForkchoiceStateV1, payloadAttributes catalyst.PayloadAttributesV1) (uint64, error) {
|
||||
reqBody := &EngineRequest{
|
||||
JsonRPC: "2.0",
|
||||
Method: "engine_forkchoiceUpdatedV1",
|
||||
Params: []interface{}{forkchoiceState, payloadAttributes},
|
||||
}
|
||||
enc, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
req, err := http.NewRequest("POST", s.currHttpEndpoint.Url, bytes.NewBuffer(enc))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
client := &http.Client{}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var respond ForkchoiceUpdatedRespond
|
||||
if err := json.Unmarshal(body, &respond); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if respond.Error.Code != 0 {
|
||||
return 0, fmt.Errorf("could not call engine_forkchoiceUpdatedV1, code: %d, message: %s", respond.Error.Code, respond.Error.Message)
|
||||
}
|
||||
if respond.Result.Status == catalyst.SYNCING.Status {
|
||||
return 0, errSyncing
|
||||
}
|
||||
id, ok := math.ParseUint64(respond.Result.PayloadID)
|
||||
if !ok {
|
||||
return 0, errors.New("could not parse hex to uint64")
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (s *Service) LatestExecutionBlock() (*ExecutionBlock, error) {
|
||||
reqBody := &EngineRequest{
|
||||
JsonRPC: "2.0",
|
||||
Method: "eth_getBlockByNumber",
|
||||
Params: []interface{}{"latest", false},
|
||||
}
|
||||
enc, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("GET", s.currHttpEndpoint.Url, bytes.NewBuffer(enc))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
client := &http.Client{}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data ExecutionBlockRespond
|
||||
if err := json.Unmarshal(body, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data.Result, nil
|
||||
}
|
||||
|
||||
func (s *Service) ExecutionBlockByHash(blockHash common.Hash) (*ExecutionBlock, error) {
|
||||
reqBody := &EngineRequest{
|
||||
JsonRPC: "2.0",
|
||||
Method: "eth_getBlockByHash",
|
||||
Params: []interface{}{blockHash.String(), false},
|
||||
}
|
||||
enc, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("GET", s.currHttpEndpoint.Url, bytes.NewBuffer(enc))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
client := &http.Client{}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := res.Body.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data ExecutionBlockRespond
|
||||
if err := json.Unmarshal(body, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data.Result, nil
|
||||
}
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
@@ -98,6 +99,8 @@ type POWBlockFetcher interface {
|
||||
BlockHashByHeight(ctx context.Context, height *big.Int) (common.Hash, error)
|
||||
BlockExists(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
|
||||
BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
|
||||
BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error)
|
||||
BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error)
|
||||
}
|
||||
|
||||
// Chain defines a standard interface for the powchain service in Prysm.
|
||||
@@ -113,6 +116,8 @@ type RPCDataFetcher interface {
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
|
||||
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
|
||||
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
|
||||
BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error)
|
||||
BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error)
|
||||
}
|
||||
|
||||
// RPCClient defines the rpc methods required to interact with the eth1 node.
|
||||
@@ -765,12 +770,14 @@ func (s *Service) initPOWService() {
|
||||
s.latestEth1Data.BlockHeight = header.Number.Uint64()
|
||||
s.latestEth1Data.BlockHash = header.Hash().Bytes()
|
||||
s.latestEth1Data.BlockTime = header.Time
|
||||
|
||||
if err := s.processPastLogs(ctx); err != nil {
|
||||
log.Errorf("Unable to process past logs %v", err)
|
||||
s.retryETH1Node(err)
|
||||
continue
|
||||
if !features.Get().MergeTestnet {
|
||||
if err := s.processPastLogs(ctx); err != nil {
|
||||
log.Errorf("Unable to process past logs %v", err)
|
||||
s.retryETH1Node(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Cache eth1 headers from our voting period.
|
||||
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
|
||||
log.Errorf("Unable to process past headers %v", err)
|
||||
@@ -779,7 +786,7 @@ func (s *Service) initPOWService() {
|
||||
}
|
||||
// Handle edge case with embedded genesis state by fetching genesis header to determine
|
||||
// its height.
|
||||
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 {
|
||||
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 && !features.Get().MergeTestnet {
|
||||
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, common.BytesToHash(s.chainStartData.Eth1Data.BlockHash))
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
|
||||
|
||||
@@ -82,6 +82,16 @@ type goodFetcher struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
// BlockByHash is a stub for `goodFetcher`.
|
||||
func (g *goodFetcher) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// BlockByNumber is a stub for `goodFetcher`.
|
||||
func (g *goodFetcher) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (g *goodFetcher) HeaderByHash(_ context.Context, hash common.Hash) (*gethTypes.Header, error) {
|
||||
if bytes.Equal(hash.Bytes(), common.BytesToHash([]byte{0}).Bytes()) {
|
||||
return nil, fmt.Errorf("expected block hash to be nonzero %v", hash)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -59,6 +60,16 @@ func (f *FaultyMockPOWChain) DepositRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// BlockByHash is a stub for `FaultyMockPOWChain`.
|
||||
func (m *FaultyMockPOWChain) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// BlockByNumber is a stub for `FaultyMockPOWChain`.
|
||||
func (m *FaultyMockPOWChain) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// DepositTrie --
|
||||
func (f *FaultyMockPOWChain) DepositTrie() *trie.SparseMerkleTrie {
|
||||
return &trie.SparseMerkleTrie{}
|
||||
|
||||
@@ -33,6 +33,16 @@ type POWChain struct {
|
||||
GenesisState state.BeaconState
|
||||
}
|
||||
|
||||
// BlockByHash is a stub for `POWChain`.
|
||||
func (m *POWChain) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// BlockByNumber is a stub for `POWChain`.
|
||||
func (m *POWChain) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// GenesisTime represents a static past date - JAN 01 2000.
|
||||
var GenesisTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
|
||||
|
||||
|
||||
@@ -50,7 +50,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MergeForkEpoch = 101
|
||||
config.ShardingForkVersion = []byte("ShardingForkVersion")
|
||||
config.ShardingForkEpoch = 102
|
||||
config.MinAnchorPowBlockDifficulty = 1000
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.GenesisDelay = 24
|
||||
config.SecondsPerSlot = 25
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"proposer_attestations.go",
|
||||
"proposer_deposits.go",
|
||||
"proposer_eth1data.go",
|
||||
"proposer_execution_payload.go",
|
||||
"proposer_phase0.go",
|
||||
"proposer_sync_aggregate.go",
|
||||
"server.go",
|
||||
@@ -29,6 +30,7 @@ go_library(
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
@@ -39,6 +41,7 @@ go_library(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
@@ -68,6 +71,8 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//eth/catalyst:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition/interop"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -34,18 +35,29 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.GetBeaconBlock")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot)))
|
||||
|
||||
if slots.ToEpoch(req.Slot) < params.BeaconConfig().AltairForkEpoch {
|
||||
blk, err := vs.getPhase0BeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not fetch phase0 beacon block: %v", err)
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Phase0{Phase0: blk}}, nil
|
||||
} else if slots.ToEpoch(req.Slot) < params.BeaconConfig().MergeForkEpoch {
|
||||
blk, err := vs.getAltairBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not fetch Altair beacon block: %v", err)
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: blk}}, nil
|
||||
}
|
||||
blk, err := vs.getAltairBeaconBlock(ctx, req)
|
||||
|
||||
blk, err := vs.getMergeBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not fetch Altair beacon block: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not fetch Merge beacon block: %v", err)
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Altair{Altair: blk}}, nil
|
||||
|
||||
log.Info("Sending block to client, transaction field is nil: ", blk.Body.ExecutionPayload.Transactions == nil)
|
||||
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Merge{Merge: blk}}, nil
|
||||
}
|
||||
|
||||
// GetBlock is called by a proposer during its assigned slot to request a block to sign
|
||||
@@ -60,6 +72,99 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
|
||||
return vs.getPhase0BeaconBlock(ctx, req)
|
||||
}
|
||||
|
||||
func (vs *Server) getMergeBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockMerge, error) {
|
||||
altairBlk, err := vs.buildAltairBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err := vs.getExecutionPayload(ctx, req.Slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"blockHash": fmt.Sprintf("%#x", payload.BlockHash),
|
||||
"parentHash": fmt.Sprintf("%#x", payload.ParentHash),
|
||||
"coinBase": fmt.Sprintf("%#x", payload.Coinbase),
|
||||
"gasLimit": payload.GasLimit,
|
||||
"gasUsed": payload.GasUsed,
|
||||
"baseFeePerGas": payload.BaseFeePerGas,
|
||||
"random": fmt.Sprintf("%#x", payload.Random),
|
||||
"extraData": fmt.Sprintf("%#x", payload.ExtraData),
|
||||
"txs": payload.Transactions,
|
||||
}).Info("Retrieved payload")
|
||||
|
||||
blk := ðpb.BeaconBlockMerge{
|
||||
Slot: altairBlk.Slot,
|
||||
ProposerIndex: altairBlk.ProposerIndex,
|
||||
ParentRoot: altairBlk.ParentRoot,
|
||||
StateRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Body: ðpb.BeaconBlockBodyMerge{
|
||||
RandaoReveal: altairBlk.Body.RandaoReveal,
|
||||
Eth1Data: altairBlk.Body.Eth1Data,
|
||||
Graffiti: altairBlk.Body.Graffiti,
|
||||
ProposerSlashings: altairBlk.Body.ProposerSlashings,
|
||||
AttesterSlashings: altairBlk.Body.AttesterSlashings,
|
||||
Attestations: altairBlk.Body.Attestations,
|
||||
Deposits: altairBlk.Body.Deposits,
|
||||
VoluntaryExits: altairBlk.Body.VoluntaryExits,
|
||||
SyncAggregate: altairBlk.Body.SyncAggregate,
|
||||
ExecutionPayload: payload,
|
||||
},
|
||||
}
|
||||
// Compute state root with the newly constructed block.
|
||||
wsb, err := wrapper.WrappedMergeSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockMerge{Block: blk, Signature: make([]byte, 96)},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err := vs.computeStateRoot(ctx, wsb)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wsb, true /*failed*/)
|
||||
return nil, fmt.Errorf("could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
func (vs *Server) buildAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.buildAltairBeaconBlock")
|
||||
defer span.End()
|
||||
blkData, err := vs.buildPhase0BlockData(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not build block data: %v", err)
|
||||
}
|
||||
// Use zero hash as stub for state root to compute later.
|
||||
stateRoot := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
// No need for safe sub as req.Slot cannot be 0 if requesting Altair blocks. If 0, we will be throwing
|
||||
// an error in the first validity check of this endpoint.
|
||||
syncAggregate, err := vs.getSyncAggregate(ctx, req.Slot-1, bytesutil.ToBytes32(blkData.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.BeaconBlockAltair{
|
||||
Slot: req.Slot,
|
||||
ParentRoot: blkData.ParentRoot,
|
||||
StateRoot: stateRoot,
|
||||
ProposerIndex: blkData.ProposerIdx,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Eth1Data: blkData.Eth1Data,
|
||||
Deposits: blkData.Deposits,
|
||||
Attestations: blkData.Attestations,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
ProposerSlashings: blkData.ProposerSlashings,
|
||||
AttesterSlashings: blkData.AttesterSlashings,
|
||||
VoluntaryExits: blkData.VoluntaryExits,
|
||||
Graffiti: blkData.Graffiti[:],
|
||||
SyncAggregate: syncAggregate,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProposeBeaconBlock is called by a proposer during its assigned slot to create a block in an attempt
|
||||
// to get it processed by the beacon node as the canonical head.
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
@@ -75,6 +180,11 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "could not wrap altair beacon block")
|
||||
}
|
||||
case *ethpb.GenericSignedBeaconBlock_Merge:
|
||||
blk, err = wrapper.WrappedMergeSignedBeaconBlock(b.Merge)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "could not wrap merge beacon block")
|
||||
}
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "block version not supported")
|
||||
}
|
||||
|
||||
@@ -98,6 +98,15 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
|
||||
}
|
||||
return altair.ProcessAttestationNoVerifySignature(ctx, st, attestation, totalBalance)
|
||||
}
|
||||
case version.Merge:
|
||||
// Use a wrapper here, as go needs strong typing for the function signature.
|
||||
attestationProcessor = func(ctx context.Context, st state.BeaconState, attestation *ethpb.Attestation) (state.BeaconState, error) {
|
||||
totalBalance, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return altair.ProcessAttestationNoVerifySignature(ctx, st, attestation, totalBalance)
|
||||
}
|
||||
default:
|
||||
// Exit early if there is an unknown state type.
|
||||
return validAtts, invalidAtts
|
||||
|
||||
@@ -0,0 +1,269 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/execution"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
|
||||
// Payload is computed given the respected time of merge.
|
||||
//
|
||||
// Spec code:
|
||||
// def prepare_execution_payload(state: BeaconState,
|
||||
// pow_chain: Dict[Hash32, PowBlock],
|
||||
// finalized_block_hash: Hash32,
|
||||
// fee_recipient: ExecutionAddress,
|
||||
// execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||
// if not is_merge_complete(state):
|
||||
// is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32()
|
||||
// is_activation_epoch_reached = get_current_epoch(state.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// if is_terminal_block_hash_set and not is_activation_epoch_reached:
|
||||
// # Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed
|
||||
// return None
|
||||
//
|
||||
// terminal_pow_block = get_terminal_pow_block(pow_chain)
|
||||
// if terminal_pow_block is None:
|
||||
// # Pre-merge, no prepare payload call is needed
|
||||
// return None
|
||||
// # Signify merge via producing on top of the terminal PoW block
|
||||
// parent_hash = terminal_pow_block.block_hash
|
||||
// else:
|
||||
// # Post-merge, normal payload
|
||||
// parent_hash = state.latest_execution_payload_header.block_hash
|
||||
//
|
||||
// # Set the forkchoice head and initiate the payload build process
|
||||
// payload_attributes = PayloadAttributes(
|
||||
// timestamp=compute_timestamp_at_slot(state, state.slot),
|
||||
// random=get_randao_mix(state, get_current_epoch(state)),
|
||||
// fee_recipient=fee_recipient,
|
||||
// )
|
||||
// return execution_engine.notify_forkchoice_updated(parent_hash, finalized_block_hash, payload_attributes)
|
||||
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot) (*ethpb.ExecutionPayload, error) {
|
||||
// TODO_MERGE: Reuse the same head state as in building phase0 block attestation.
|
||||
st, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err = transition.ProcessSlots(ctx, st, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var parentHash []byte
|
||||
var hasTerminalBlock bool
|
||||
complete, err := execution.IsMergeComplete(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !complete {
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) != [32]byte{} {
|
||||
// `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
isActivationEpochReached := params.BeaconConfig().TerminalBlockHashActivationEpoch <= slots.ToEpoch(slot)
|
||||
if !isActivationEpochReached {
|
||||
return execution.EmptyPayload(), nil
|
||||
}
|
||||
}
|
||||
|
||||
parentHash, hasTerminalBlock, err = vs.getTerminalBlockHash(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !hasTerminalBlock {
|
||||
// No terminal block signals this is pre merge, empty payload is used.
|
||||
return execution.EmptyPayload(), nil
|
||||
}
|
||||
// Terminal block found signals production on top of terminal PoW block.
|
||||
} else {
|
||||
// Post merge, normal payload is used.
|
||||
header, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentHash = header.BlockHash
|
||||
}
|
||||
|
||||
t, err := slots.ToTime(st.GenesisTime(), slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
finalizedBlock, err := vs.BeaconDB.Block(ctx, bytesutil.ToBytes32(st.FinalizedCheckpoint().Root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedBlockHash := params.BeaconConfig().ZeroHash[:]
|
||||
if finalizedBlock != nil && finalizedBlock.Version() == version.Merge {
|
||||
finalizedPayload, err := finalizedBlock.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedBlockHash = finalizedPayload.BlockHash
|
||||
}
|
||||
|
||||
f := catalyst.ForkchoiceStateV1{
|
||||
HeadBlockHash: common.BytesToHash(parentHash),
|
||||
SafeBlockHash: common.BytesToHash(parentHash),
|
||||
FinalizedBlockHash: common.BytesToHash(finalizedBlockHash),
|
||||
}
|
||||
p := catalyst.PayloadAttributesV1{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
Random: common.BytesToHash(random),
|
||||
FeeRecipient: params.BeaconConfig().FeeRecipient,
|
||||
}
|
||||
id, err := vs.ExecutionEngineCaller.PreparePayload(ctx, f, p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not prepare payload")
|
||||
}
|
||||
data, err := vs.ExecutionEngineCaller.GetPayload(ctx, id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload")
|
||||
}
|
||||
|
||||
return executableDataToExecutionPayload(data), nil
|
||||
}
|
||||
|
||||
func executableDataToExecutionPayload(ed *catalyst.ExecutableDataV1) *ethpb.ExecutionPayload {
|
||||
return ðpb.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo(ed.ParentHash.Bytes(), 32),
|
||||
Coinbase: bytesutil.PadTo(ed.Coinbase.Bytes(), 20),
|
||||
StateRoot: bytesutil.PadTo(ed.StateRoot.Bytes(), 32),
|
||||
ReceiptRoot: bytesutil.PadTo(ed.ReceiptRoot.Bytes(), 32),
|
||||
LogsBloom: bytesutil.PadTo(ed.LogsBloom, 256),
|
||||
Random: bytesutil.PadTo(ed.Random.Bytes(), 32),
|
||||
BlockNumber: ed.Number,
|
||||
GasLimit: ed.GasLimit,
|
||||
GasUsed: ed.GasUsed,
|
||||
Timestamp: ed.Timestamp,
|
||||
ExtraData: ed.ExtraData,
|
||||
BaseFeePerGas: bytesutil.PadTo(ed.BaseFeePerGas.Bytes(), 32),
|
||||
BlockHash: bytesutil.PadTo(ed.BlockHash.Bytes(), 32),
|
||||
Transactions: ed.Transactions,
|
||||
}
|
||||
}
|
||||
|
||||
// This returns the valid terminal block hash with an existence bool value.
|
||||
//
|
||||
// Spec code:
|
||||
// def get_terminal_pow_block(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
|
||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||
// # Terminal block hash override takes precedence over terminal total difficulty
|
||||
// if TERMINAL_BLOCK_HASH in pow_chain:
|
||||
// return pow_chain[TERMINAL_BLOCK_HASH]
|
||||
// else:
|
||||
// return None
|
||||
//
|
||||
// return get_pow_block_at_terminal_total_difficulty(pow_chain)
|
||||
func (vs *Server) getTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
|
||||
terminalBlockHash := params.BeaconConfig().TerminalBlockHash
|
||||
// Terminal block hash override takes precedence over terminal total difficult.
|
||||
if params.BeaconConfig().TerminalBlockHash != params.BeaconConfig().ZeroHash {
|
||||
e, _, err := vs.Eth1BlockFetcher.BlockExists(ctx, terminalBlockHash)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !e {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
return terminalBlockHash.Bytes(), true, nil
|
||||
}
|
||||
|
||||
return vs.getPowBlockHashAtTerminalTotalDifficulty(ctx)
|
||||
}
|
||||
|
||||
// This returns the valid terminal block hash based on total difficulty.
|
||||
//
|
||||
// Spec code:
|
||||
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
|
||||
// # `pow_chain` abstractly represents all blocks in the PoW chain
|
||||
// for block in pow_chain:
|
||||
// parent = pow_chain[block.parent_hash]
|
||||
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// if block_reached_ttd and not parent_reached_ttd:
|
||||
// return block
|
||||
//
|
||||
// return None
|
||||
func (vs *Server) getPowBlockHashAtTerminalTotalDifficulty(ctx context.Context) ([]byte, bool, error) {
|
||||
blk, err := vs.ExecutionEngineCaller.LatestExecutionBlock()
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get latest execution block")
|
||||
}
|
||||
parentBlk, err := vs.ExecutionEngineCaller.ExecutionBlockByHash(common.HexToHash(blk.ParentHash))
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
if parentBlk == nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
terminalTotalDifficulty := new(big.Int)
|
||||
terminalTotalDifficulty.SetUint64(params.BeaconConfig().TerminalTotalDifficulty)
|
||||
|
||||
currentTotalDifficulty := common.HexToHash(blk.TotalDifficulty).Big()
|
||||
parentTotalDifficulty := common.HexToHash(parentBlk.TotalDifficulty).Big()
|
||||
blkNumber := blk.Number
|
||||
// TODO_MERGE: This can theoretically loop indefinitely. More discussion: https://github.com/ethereum/consensus-specs/issues/2636
|
||||
logged := false
|
||||
for {
|
||||
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
parentReachedTTD := terminalTotalDifficulty.Cmp(parentTotalDifficulty) >= 0
|
||||
|
||||
if blockReachedTTD && parentReachedTTD {
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentTotalDifficulty": currentTotalDifficulty,
|
||||
"parentTotalDifficulty": parentTotalDifficulty,
|
||||
"terminalTotalDifficulty": terminalTotalDifficulty,
|
||||
"terminalBlockHash": fmt.Sprintf("%#x", common.HexToHash(blk.Hash)),
|
||||
"terminalBlockNumber": blkNumber,
|
||||
}).Info("'Terminal difficulty reached")
|
||||
return common.HexToHash(blk.Hash).Bytes(), true, err
|
||||
} else {
|
||||
if !logged {
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentTotalDifficulty": currentTotalDifficulty,
|
||||
"parentTotalDifficulty": parentTotalDifficulty,
|
||||
"terminalTotalDifficulty": terminalTotalDifficulty,
|
||||
"terminalBlockHash": fmt.Sprintf("%#x", common.HexToHash(blk.Hash)),
|
||||
"terminalBlockNumber": blkNumber,
|
||||
}).Info("Terminal difficulty NOT reached")
|
||||
logged = true
|
||||
}
|
||||
|
||||
blk := parentBlk
|
||||
blkNumber = blk.Number
|
||||
// TODO_MERGE: Add pow block cache to avoid requesting seen block.
|
||||
|
||||
parentBlk, err = vs.ExecutionEngineCaller.ExecutionBlockByHash(common.HexToHash(blk.ParentHash))
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if parentBlk == nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
currentTotalDifficulty = common.HexToHash(blk.TotalDifficulty).Big()
|
||||
parentTotalDifficulty = common.HexToHash(parentBlk.TotalDifficulty).Big()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
@@ -63,6 +64,8 @@ type Server struct {
|
||||
PendingDepositsFetcher depositcache.PendingDepositsFetcher
|
||||
OperationNotifier opfeed.Notifier
|
||||
StateGen stategen.StateManager
|
||||
ExecutionEngineCaller powchain.ExecutionEngineCaller
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
}
|
||||
|
||||
// WaitForActivation checks if a validator public key exists in the active validator registry of the current
|
||||
|
||||
@@ -108,6 +108,7 @@ type Config struct {
|
||||
OperationNotifier opfeed.Notifier
|
||||
StateGen *stategen.State
|
||||
MaxMsgSize int
|
||||
ExecutionEngineCaller powchain.ExecutionEngineCaller
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
@@ -194,6 +195,8 @@ func (s *Service) Start() {
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
StateGen: s.cfg.StateGen,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
|
||||
@@ -43,6 +43,7 @@ type ReadOnlyBeaconState interface {
|
||||
FieldReferencesCount() map[string]uint64
|
||||
MarshalSSZ() ([]byte, error)
|
||||
IsNil() bool
|
||||
LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error)
|
||||
}
|
||||
|
||||
// WriteOnlyBeaconState defines a struct which only has write access to beacon state methods.
|
||||
@@ -64,6 +65,7 @@ type WriteOnlyBeaconState interface {
|
||||
SetSlashings(val []uint64) error
|
||||
UpdateSlashingsAtIndex(idx, val uint64) error
|
||||
AppendHistoricalRoots(root [32]byte) error
|
||||
SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error
|
||||
}
|
||||
|
||||
// ReadOnlyValidator defines a struct which only has read access to validator methods.
|
||||
|
||||
@@ -149,7 +149,7 @@ func executeStateTransitionStateGen(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
if signed.Version() == version.Altair {
|
||||
if signed.Version() == version.Altair || signed.Version() == version.Merge {
|
||||
sa, err := signed.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -200,6 +200,11 @@ func processSlotsStateGen(ctx context.Context, state state.BeaconState, slot typ
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimization")
|
||||
}
|
||||
case version.Merge:
|
||||
state, err = altair.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimization")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("beacon state should have a version")
|
||||
}
|
||||
|
||||
@@ -54,12 +54,12 @@ func (f FieldIndex) String(stateVersion int) string {
|
||||
case Slashings:
|
||||
return "slashings"
|
||||
case PreviousEpochAttestations:
|
||||
if version.Altair == stateVersion {
|
||||
if version.Altair == stateVersion || version.Merge == stateVersion {
|
||||
return "previousEpochParticipationBits"
|
||||
}
|
||||
return "previousEpochAttestations"
|
||||
case CurrentEpochAttestations:
|
||||
if version.Altair == stateVersion {
|
||||
if version.Altair == stateVersion || version.Merge == stateVersion {
|
||||
return "currentEpochParticipationBits"
|
||||
}
|
||||
return "currentEpochAttestations"
|
||||
@@ -77,6 +77,8 @@ func (f FieldIndex) String(stateVersion int) string {
|
||||
return "currentSyncCommittee"
|
||||
case NextSyncCommittee:
|
||||
return "nextSyncCommittee"
|
||||
case LatestExecutionPayloadHeader:
|
||||
return "latestExecutionPayloadHeader"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
@@ -114,12 +116,13 @@ const (
|
||||
InactivityScores
|
||||
CurrentSyncCommittee
|
||||
NextSyncCommittee
|
||||
// State fields added in Merge.
|
||||
LatestExecutionPayloadHeader
|
||||
)
|
||||
|
||||
// Altair fields which replaced previous phase 0 fields.
|
||||
const (
|
||||
// Epoch Attestations is switched with participation bits in
|
||||
// Altair.
|
||||
// Epoch Attestations is switched with participation bits in Altair.
|
||||
PreviousEpochParticipationBits = PreviousEpochAttestations
|
||||
CurrentEpochParticipationBits = CurrentEpochAttestations
|
||||
)
|
||||
|
||||
@@ -29,3 +29,8 @@ func (b *BeaconState) CurrentSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
func (b *BeaconState) NextSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
return nil, errors.New("NextSyncCommittee is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
// LatestExecutionPayloadHeader is not supported for phase 0 beacon state.
|
||||
func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) {
|
||||
return nil, errors.New("LatestExecutionPayloadHeader is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
@@ -44,3 +44,8 @@ func (b *BeaconState) SetCurrentParticipationBits(val []byte) error {
|
||||
func (b *BeaconState) SetInactivityScores(val []uint64) error {
|
||||
return errors.New("SetInactivityScores is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
// SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state.
|
||||
func (b *BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error {
|
||||
return errors.New("SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
@@ -14,3 +14,8 @@ func (b *BeaconState) PreviousEpochAttestations() ([]*ethpb.PendingAttestation,
|
||||
func (b *BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
return nil, errors.New("CurrentEpochAttestations is not supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
// LatestExecutionPayloadHeader is not supported for phase 0 beacon state.
|
||||
func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) {
|
||||
return nil, errors.New("LatestExecutionPayloadHeader is not supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package v2
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -29,3 +30,13 @@ func (b *BeaconState) AppendPreviousEpochAttestations(val *ethpb.PendingAttestat
|
||||
func (b *BeaconState) RotateAttestations() error {
|
||||
return errors.New("RotateAttestations is not supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
// ToProto is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) ToProto() (*v1.BeaconState, error) {
|
||||
return nil, errors.New("ToProto is not yet supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
// SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state.
|
||||
func (b *BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error {
|
||||
return errors.New("SetLatestExecutionPayloadHeader is not supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
48
beacon-chain/state/v3/BUILD.bazel
Normal file
48
beacon-chain/state/v3/BUILD.bazel
Normal file
@@ -0,0 +1,48 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deprecated_getters.go",
|
||||
"deprecated_setters.go",
|
||||
"field_root_eth1.go",
|
||||
"field_root_validator.go",
|
||||
"field_root_vector.go",
|
||||
"field_roots.go",
|
||||
"getters.go",
|
||||
"setters.go",
|
||||
"state_trie.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//proto/migration:__subpackages__",
|
||||
"//runtime/interop:__subpackages__",
|
||||
"//shared/testutil:__pkg__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//testing/util:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/fieldtrie:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_dgraph_io_ristretto//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
16
beacon-chain/state/v3/deprecated_getters.go
Normal file
16
beacon-chain/state/v3/deprecated_getters.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// PreviousEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) PreviousEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
return nil, errors.New("PreviousEpochAttestations is not supported for Merge beacon state")
|
||||
}
|
||||
|
||||
// CurrentEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
return nil, errors.New("CurrentEpochAttestations is not supported for Merge beacon state")
|
||||
}
|
||||
37
beacon-chain/state/v3/deprecated_setters.go
Normal file
37
beacon-chain/state/v3/deprecated_setters.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SetPreviousEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) SetPreviousEpochAttestations(val []*ethpb.PendingAttestation) error {
|
||||
return errors.New("SetPreviousEpochAttestations is not supported for Merge beacon state")
|
||||
}
|
||||
|
||||
// SetCurrentEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) SetCurrentEpochAttestations(val []*ethpb.PendingAttestation) error {
|
||||
return errors.New("SetCurrentEpochAttestations is not supported for Merge beacon state")
|
||||
}
|
||||
|
||||
// AppendCurrentEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) AppendCurrentEpochAttestations(val *ethpb.PendingAttestation) error {
|
||||
return errors.New("AppendCurrentEpochAttestations is not supported for Merge beacon state")
|
||||
}
|
||||
|
||||
// AppendPreviousEpochAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) AppendPreviousEpochAttestations(val *ethpb.PendingAttestation) error {
|
||||
return errors.New("AppendPreviousEpochAttestations is not supported for Merge beacon state")
|
||||
}
|
||||
|
||||
// RotateAttestations is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) RotateAttestations() error {
|
||||
return errors.New("RotateAttestations is not supported for Merge beacon state")
|
||||
}
|
||||
|
||||
// ToProto is not supported for HF1 beacon state.
|
||||
func (b *BeaconState) ToProto() (*v1.BeaconState, error) {
|
||||
return nil, errors.New("ToProto is not yet supported for Merge beacon state")
|
||||
}
|
||||
59
beacon-chain/state/v3/field_root_eth1.go
Normal file
59
beacon-chain/state/v3/field_root_eth1.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// eth1Root computes the HashTreeRoot Merkleization of
|
||||
// a BeaconBlockHeader struct according to the eth2
|
||||
// Simple Serialize specification.
|
||||
func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) {
|
||||
if eth1Data == nil {
|
||||
return [32]byte{}, errors.New("nil eth1 data")
|
||||
}
|
||||
|
||||
enc := stateutil.Eth1DataEncKey(eth1Data)
|
||||
if features.Get().EnableSSZCache {
|
||||
if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil {
|
||||
return found.([32]byte), nil
|
||||
}
|
||||
}
|
||||
|
||||
root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
if features.Get().EnableSSZCache {
|
||||
cachedHasher.rootsCache.Set(string(enc), root, 32)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// eth1DataVotesRoot computes the HashTreeRoot Merkleization of
|
||||
// a list of Eth1Data structs according to the eth2
|
||||
// Simple Serialize specification.
|
||||
func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) {
|
||||
hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
if features.Get().EnableSSZCache {
|
||||
if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil {
|
||||
return found.([32]byte), nil
|
||||
}
|
||||
}
|
||||
root, err := stateutil.Eth1DatasRoot(eth1DataVotes)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if features.Get().EnableSSZCache {
|
||||
cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
89
beacon-chain/state/v3/field_root_validator.go
Normal file
89
beacon-chain/state/v3/field_root_validator.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
|
||||
hashKeyElements := make([]byte, len(validators)*32)
|
||||
roots := make([][32]byte, len(validators))
|
||||
emptyKey := hash.FastSum256(hashKeyElements)
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
bytesProcessed := 0
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val, err := h.validatorRoot(hasher, validators[i])
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
|
||||
}
|
||||
copy(hashKeyElements[bytesProcessed:bytesProcessed+32], val[:])
|
||||
roots[i] = val
|
||||
bytesProcessed += 32
|
||||
}
|
||||
|
||||
hashKey := hash.FastSum256(hashKeyElements)
|
||||
if hashKey != emptyKey && h.rootsCache != nil {
|
||||
if found, ok := h.rootsCache.Get(string(hashKey[:])); found != nil && ok {
|
||||
return found.([32]byte), nil
|
||||
}
|
||||
}
|
||||
|
||||
validatorsRootsRoot, err := ssz.BitwiseMerkleizeArrays(hasher, roots, uint64(len(roots)), params.BeaconConfig().ValidatorRegistryLimit)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute validator registry merkleization")
|
||||
}
|
||||
validatorsRootsBuf := new(bytes.Buffer)
|
||||
if err := binary.Write(validatorsRootsBuf, binary.LittleEndian, uint64(len(validators))); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not marshal validator registry length")
|
||||
}
|
||||
// We need to mix in the length of the slice.
|
||||
var validatorsRootsBufRoot [32]byte
|
||||
copy(validatorsRootsBufRoot[:], validatorsRootsBuf.Bytes())
|
||||
res := ssz.MixInLength(validatorsRootsRoot, validatorsRootsBufRoot[:])
|
||||
if hashKey != emptyKey && h.rootsCache != nil {
|
||||
h.rootsCache.Set(string(hashKey[:]), res, 32)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Validator) ([32]byte, error) {
|
||||
if validator == nil {
|
||||
return [32]byte{}, errors.New("nil validator")
|
||||
}
|
||||
|
||||
enc := stateutil.ValidatorEncKey(validator)
|
||||
// Check if it exists in cache:
|
||||
if h.rootsCache != nil {
|
||||
if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok {
|
||||
return found.([32]byte), nil
|
||||
}
|
||||
}
|
||||
|
||||
valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
if h.rootsCache != nil {
|
||||
h.rootsCache.Set(string(enc), valRoot, 32)
|
||||
}
|
||||
return valRoot, nil
|
||||
}
|
||||
|
||||
// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of
|
||||
// a list of validator structs according to the eth2
|
||||
// Simple Serialize specification.
|
||||
func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) {
|
||||
if features.Get().EnableSSZCache {
|
||||
return cachedHasher.validatorRegistryRoot(vals)
|
||||
}
|
||||
return nocachedHasher.validatorRegistryRoot(vals)
|
||||
}
|
||||
142
beacon-chain/state/v3/field_root_vector.go
Normal file
142
beacon-chain/state/v3/field_root_vector.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
)
|
||||
|
||||
func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName string) ([32]byte, error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
if _, ok := layersCache[fieldName]; !ok && h.rootsCache != nil {
|
||||
depth := ssz.Depth(length)
|
||||
layersCache[fieldName] = make([][][32]byte, depth+1)
|
||||
}
|
||||
|
||||
leaves := make([][32]byte, length)
|
||||
for i, chunk := range input {
|
||||
copy(leaves[i][:], chunk)
|
||||
}
|
||||
bytesProcessed := 0
|
||||
changedIndices := make([]int, 0)
|
||||
prevLeaves, ok := leavesCache[fieldName]
|
||||
if len(prevLeaves) == 0 || h.rootsCache == nil {
|
||||
prevLeaves = leaves
|
||||
}
|
||||
|
||||
for i := 0; i < len(leaves); i++ {
|
||||
// We check if any items changed since the roots were last recomputed.
|
||||
notEqual := leaves[i] != prevLeaves[i]
|
||||
if ok && h.rootsCache != nil && notEqual {
|
||||
changedIndices = append(changedIndices, i)
|
||||
}
|
||||
bytesProcessed += 32
|
||||
}
|
||||
if len(changedIndices) > 0 && h.rootsCache != nil {
|
||||
var rt [32]byte
|
||||
var err error
|
||||
// If indices did change since last computation, we only recompute
|
||||
// the modified branches in the cached Merkle tree for this state field.
|
||||
chunks := leaves
|
||||
|
||||
// We need to ensure we recompute indices of the Merkle tree which
|
||||
// changed in-between calls to this function. This check adds an offset
|
||||
// to the recomputed indices to ensure we do so evenly.
|
||||
maxChangedIndex := changedIndices[len(changedIndices)-1]
|
||||
if maxChangedIndex+2 == len(chunks) && maxChangedIndex%2 != 0 {
|
||||
changedIndices = append(changedIndices, maxChangedIndex+1)
|
||||
}
|
||||
for i := 0; i < len(changedIndices); i++ {
|
||||
rt, err = recomputeRoot(changedIndices[i], chunks, fieldName, hashFunc)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
}
|
||||
leavesCache[fieldName] = chunks
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if h.rootsCache != nil {
|
||||
leavesCache[fieldName] = leaves
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func recomputeRoot(idx int, chunks [][32]byte, fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
|
||||
items, ok := layersCache[fieldName]
|
||||
if !ok {
|
||||
return [32]byte{}, errors.New("could not recompute root as there was no cache found")
|
||||
}
|
||||
if items == nil {
|
||||
return [32]byte{}, errors.New("could not recompute root as there were no items found in the layers cache")
|
||||
}
|
||||
layers := items
|
||||
root := chunks[idx]
|
||||
layers[0] = chunks
|
||||
// The merkle tree structure looks as follows:
|
||||
// [[r1, r2, r3, r4], [parent1, parent2], [root]]
|
||||
// Using information about the index which changed, idx, we recompute
|
||||
// only its branch up the tree.
|
||||
currentIndex := idx
|
||||
for i := 0; i < len(layers)-1; i++ {
|
||||
isLeft := currentIndex%2 == 0
|
||||
neighborIdx := currentIndex ^ 1
|
||||
|
||||
neighbor := [32]byte{}
|
||||
if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) {
|
||||
neighbor = layers[i][neighborIdx]
|
||||
}
|
||||
if isLeft {
|
||||
parentHash := hasher(append(root[:], neighbor[:]...))
|
||||
root = parentHash
|
||||
} else {
|
||||
parentHash := hasher(append(neighbor[:], root[:]...))
|
||||
root = parentHash
|
||||
}
|
||||
parentIdx := currentIndex / 2
|
||||
// Update the cached layers at the parent index.
|
||||
if len(layers[i+1]) == 0 {
|
||||
layers[i+1] = append(layers[i+1], root)
|
||||
} else {
|
||||
layers[i+1][parentIdx] = root
|
||||
}
|
||||
currentIndex = parentIdx
|
||||
}
|
||||
layersCache[fieldName] = layers
|
||||
// If there is only a single leaf, we return it (the identity element).
|
||||
if len(layers[0]) == 1 {
|
||||
return layers[0][0], nil
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64,
|
||||
fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) {
|
||||
if len(leaves) == 1 {
|
||||
return leaves[0], nil
|
||||
}
|
||||
hashLayer := leaves
|
||||
layers := make([][][32]byte, ssz.Depth(length)+1)
|
||||
if items, ok := layersCache[fieldName]; ok && h.rootsCache != nil {
|
||||
if len(items[0]) == len(leaves) {
|
||||
layers = items
|
||||
}
|
||||
}
|
||||
layers[0] = hashLayer
|
||||
layers, hashLayer, err := stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
root := hashLayer[0]
|
||||
if h.rootsCache != nil {
|
||||
layersCache[fieldName] = layers
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
228
beacon-chain/state/v3/field_roots.go
Normal file
228
beacon-chain/state/v3/field_roots.go
Normal file
@@ -0,0 +1,228 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/dgraph-io/ristretto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
|
||||
layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
|
||||
lock sync.RWMutex
|
||||
)
|
||||
|
||||
const cacheSize = 100000
|
||||
|
||||
var nocachedHasher *stateRootHasher
|
||||
var cachedHasher *stateRootHasher
|
||||
|
||||
func init() {
|
||||
rootsCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: cacheSize, // number of keys to track frequency of (1M).
|
||||
MaxCost: 1 << 22, // maximum cost of cache (3MB).
|
||||
// 100,000 roots will take up approximately 3 MB in memory.
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Temporarily disable roots cache until cache issues can be resolved.
|
||||
cachedHasher = &stateRootHasher{rootsCache: rootsCache}
|
||||
nocachedHasher = &stateRootHasher{}
|
||||
}
|
||||
|
||||
type stateRootHasher struct {
|
||||
rootsCache *ristretto.Cache
|
||||
}
|
||||
|
||||
// computeFieldRoots returns the hash tree root computations of every field in
|
||||
// the beacon state as a list of 32 byte roots.
|
||||
func computeFieldRoots(state *ethpb.BeaconStateMerge) ([][]byte, error) {
|
||||
if features.Get().EnableSSZCache {
|
||||
return cachedHasher.computeFieldRootsWithHasher(state)
|
||||
}
|
||||
return nocachedHasher.computeFieldRootsWithHasher(state)
|
||||
}
|
||||
|
||||
func (h *stateRootHasher) computeFieldRootsWithHasher(state *ethpb.BeaconStateMerge) ([][]byte, error) {
|
||||
if state == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateMergeFieldCount)
|
||||
|
||||
// Genesis time root.
|
||||
genesisRoot := ssz.Uint64Root(state.GenesisTime)
|
||||
fieldRoots[0] = genesisRoot[:]
|
||||
|
||||
// Genesis validator root.
|
||||
r := [32]byte{}
|
||||
copy(r[:], state.GenesisValidatorsRoot)
|
||||
fieldRoots[1] = r[:]
|
||||
|
||||
// Slot root.
|
||||
slotRoot := ssz.Uint64Root(uint64(state.Slot))
|
||||
fieldRoots[2] = slotRoot[:]
|
||||
|
||||
// Fork data structure root.
|
||||
forkHashTreeRoot, err := ssz.ForkRoot(state.Fork)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute fork merkleization")
|
||||
}
|
||||
fieldRoots[3] = forkHashTreeRoot[:]
|
||||
|
||||
// BeaconBlockHeader data structure root.
|
||||
headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute block header merkleization")
|
||||
}
|
||||
fieldRoots[4] = headerHashTreeRoot[:]
|
||||
|
||||
// BlockRoots array root.
|
||||
blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute block roots merkleization")
|
||||
}
|
||||
fieldRoots[5] = blockRootsRoot[:]
|
||||
|
||||
// StateRoots array root.
|
||||
stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute state roots merkleization")
|
||||
}
|
||||
fieldRoots[6] = stateRootsRoot[:]
|
||||
|
||||
// HistoricalRoots slice root.
|
||||
historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute historical roots merkleization")
|
||||
}
|
||||
fieldRoots[7] = historicalRootsRt[:]
|
||||
|
||||
// Eth1Data data structure root.
|
||||
eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute eth1data merkleization")
|
||||
}
|
||||
fieldRoots[8] = eth1HashTreeRoot[:]
|
||||
|
||||
// Eth1DataVotes slice root.
|
||||
eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute eth1data votes merkleization")
|
||||
}
|
||||
fieldRoots[9] = eth1VotesRoot[:]
|
||||
|
||||
// Eth1DepositIndex root.
|
||||
eth1DepositIndexBuf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex)
|
||||
eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf)
|
||||
fieldRoots[10] = eth1DepositBuf[:]
|
||||
|
||||
// Validators slice root.
|
||||
validatorsRoot, err := h.validatorRegistryRoot(state.Validators)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute validator registry merkleization")
|
||||
}
|
||||
fieldRoots[11] = validatorsRoot[:]
|
||||
|
||||
// Balances slice root.
|
||||
balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute validator balances merkleization")
|
||||
}
|
||||
fieldRoots[12] = balancesRoot[:]
|
||||
|
||||
// RandaoMixes array root.
|
||||
randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute randao roots merkleization")
|
||||
}
|
||||
fieldRoots[13] = randaoRootsRoot[:]
|
||||
|
||||
// Slashings array root.
|
||||
slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute slashings merkleization")
|
||||
}
|
||||
fieldRoots[14] = slashingsRootsRoot[:]
|
||||
|
||||
// PreviousEpochParticipation slice root.
|
||||
prevParticipationRoot, err := stateutil.ParticipationBitsRoot(state.PreviousEpochParticipation)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization")
|
||||
}
|
||||
fieldRoots[15] = prevParticipationRoot[:]
|
||||
|
||||
// CurrentEpochParticipation slice root.
|
||||
currParticipationRoot, err := stateutil.ParticipationBitsRoot(state.CurrentEpochParticipation)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute current epoch participation merkleization")
|
||||
}
|
||||
fieldRoots[16] = currParticipationRoot[:]
|
||||
|
||||
// JustificationBits root.
|
||||
justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits)
|
||||
fieldRoots[17] = justifiedBitsRoot[:]
|
||||
|
||||
// PreviousJustifiedCheckpoint data structure root.
|
||||
prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization")
|
||||
}
|
||||
fieldRoots[18] = prevCheckRoot[:]
|
||||
|
||||
// CurrentJustifiedCheckpoint data structure root.
|
||||
currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization")
|
||||
}
|
||||
fieldRoots[19] = currJustRoot[:]
|
||||
|
||||
// FinalizedCheckpoint data structure root.
|
||||
finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization")
|
||||
}
|
||||
fieldRoots[20] = finalRoot[:]
|
||||
|
||||
// Inactivity scores root.
|
||||
inactivityScoresRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.InactivityScores)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute inactivityScoreRoot")
|
||||
}
|
||||
fieldRoots[21] = inactivityScoresRoot[:]
|
||||
|
||||
// Current sync committee root.
|
||||
currentSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.CurrentSyncCommittee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
|
||||
}
|
||||
fieldRoots[22] = currentSyncCommitteeRoot[:]
|
||||
|
||||
// Next sync committee root.
|
||||
nextSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.NextSyncCommittee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute sync committee merkleization")
|
||||
}
|
||||
fieldRoots[23] = nextSyncCommitteeRoot[:]
|
||||
|
||||
// Execution payload root.
|
||||
executionPayloadRoot, err := state.LatestExecutionPayloadHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldRoots[24] = executionPayloadRoot[:]
|
||||
|
||||
return fieldRoots, nil
|
||||
}
|
||||
1203
beacon-chain/state/v3/getters.go
Normal file
1203
beacon-chain/state/v3/getters.go
Normal file
File diff suppressed because it is too large
Load Diff
828
beacon-chain/state/v3/setters.go
Normal file
828
beacon-chain/state/v3/setters.go
Normal file
@@ -0,0 +1,828 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// For our setters, we have a field reference counter through
|
||||
// which we can track shared field references. This helps when
|
||||
// performing state copies, as we simply copy the reference to the
|
||||
// field. When we do need to do need to modify these fields, we
|
||||
// perform a full copy of the field. This is true of most of our
|
||||
// fields except for the following below.
|
||||
// 1) BlockRoots
|
||||
// 2) StateRoots
|
||||
// 3) Eth1DataVotes
|
||||
// 4) RandaoMixes
|
||||
// 5) HistoricalRoots
|
||||
// 6) CurrentParticipationBits
|
||||
// 7) PreviousParticipationBits
|
||||
//
|
||||
// The fields referred to above are instead copied by reference, where
|
||||
// we simply copy the reference to the underlying object instead of the
|
||||
// whole object. This is possible due to how we have structured our state
|
||||
// as we copy the value on read, so as to ensure the underlying object is
|
||||
// not mutated while it is being accessed during a state read.
|
||||
|
||||
// SetGenesisTime for the beacon state.
|
||||
func (b *BeaconState) SetGenesisTime(val uint64) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.GenesisTime = val
|
||||
b.markFieldAsDirty(genesisTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetGenesisValidatorRoot for the beacon state.
|
||||
func (b *BeaconState) SetGenesisValidatorRoot(val []byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.GenesisValidatorsRoot = val
|
||||
b.markFieldAsDirty(genesisValidatorRoot)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSlot for the beacon state.
|
||||
func (b *BeaconState) SetSlot(val types.Slot) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.Slot = val
|
||||
b.markFieldAsDirty(slot)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFork version for the beacon chain.
|
||||
func (b *BeaconState) SetFork(val *ethpb.Fork) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
fk, ok := proto.Clone(val).(*ethpb.Fork)
|
||||
if !ok {
|
||||
return errors.New("proto.Clone did not return a fork proto")
|
||||
}
|
||||
b.state.Fork = fk
|
||||
b.markFieldAsDirty(fork)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLatestBlockHeader in the beacon state.
|
||||
func (b *BeaconState) SetLatestBlockHeader(val *ethpb.BeaconBlockHeader) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.LatestBlockHeader = ethpb.CopyBeaconBlockHeader(val)
|
||||
b.markFieldAsDirty(latestBlockHeader)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBlockRoots for the beacon state. Updates the entire
|
||||
// list to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetBlockRoots(val [][]byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[blockRoots].MinusRef()
|
||||
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
|
||||
|
||||
b.state.BlockRoots = val
|
||||
b.markFieldAsDirty(blockRoots)
|
||||
b.rebuildTrie[blockRoots] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBlockRootAtIndex for the beacon state. Updates the block root
|
||||
// at a specific index to a new value.
|
||||
func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
if uint64(len(b.state.BlockRoots)) <= idx {
|
||||
return fmt.Errorf("invalid index provided %d", idx)
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
r := b.state.BlockRoots
|
||||
if ref := b.sharedFieldReferences[blockRoots]; ref.Refs() > 1 {
|
||||
// Copy elements in underlying array by reference.
|
||||
r = make([][]byte, len(b.state.BlockRoots))
|
||||
copy(r, b.state.BlockRoots)
|
||||
ref.MinusRef()
|
||||
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
r[idx] = blockRoot[:]
|
||||
b.state.BlockRoots = r
|
||||
|
||||
b.markFieldAsDirty(blockRoots)
|
||||
b.addDirtyIndices(blockRoots, []uint64{idx})
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStateRoots for the beacon state. Updates the state roots
|
||||
// to a new value by overwriting the previous value.
|
||||
func (b *BeaconState) SetStateRoots(val [][]byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[stateRoots].MinusRef()
|
||||
b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1)
|
||||
|
||||
b.state.StateRoots = val
|
||||
b.markFieldAsDirty(stateRoots)
|
||||
b.rebuildTrie[stateRoots] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateStateRootAtIndex for the beacon state. Updates the state root
|
||||
// at a specific index to a new value.
|
||||
func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
if uint64(len(b.state.StateRoots)) <= idx {
|
||||
b.lock.RUnlock()
|
||||
return errors.Errorf("invalid index provided %d", idx)
|
||||
}
|
||||
b.lock.RUnlock()
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
// Check if we hold the only reference to the shared state roots slice.
|
||||
r := b.state.StateRoots
|
||||
if ref := b.sharedFieldReferences[stateRoots]; ref.Refs() > 1 {
|
||||
// Copy elements in underlying array by reference.
|
||||
r = make([][]byte, len(b.state.StateRoots))
|
||||
copy(r, b.state.StateRoots)
|
||||
ref.MinusRef()
|
||||
b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
r[idx] = stateRoot[:]
|
||||
b.state.StateRoots = r
|
||||
|
||||
b.markFieldAsDirty(stateRoots)
|
||||
b.addDirtyIndices(stateRoots, []uint64{idx})
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetHistoricalRoots for the beacon state. Updates the entire
|
||||
// list to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetHistoricalRoots(val [][]byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[historicalRoots].MinusRef()
|
||||
b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1)
|
||||
|
||||
b.state.HistoricalRoots = val
|
||||
b.markFieldAsDirty(historicalRoots)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetEth1Data for the beacon state.
|
||||
func (b *BeaconState) SetEth1Data(val *ethpb.Eth1Data) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.Eth1Data = val
|
||||
b.markFieldAsDirty(eth1Data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetEth1DataVotes for the beacon state. Updates the entire
|
||||
// list to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetEth1DataVotes(val []*ethpb.Eth1Data) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[eth1DataVotes].MinusRef()
|
||||
b.sharedFieldReferences[eth1DataVotes] = stateutil.NewRef(1)
|
||||
|
||||
b.state.Eth1DataVotes = val
|
||||
b.markFieldAsDirty(eth1DataVotes)
|
||||
b.rebuildTrie[eth1DataVotes] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendEth1DataVotes for the beacon state. Appends the new value
|
||||
// to the end of list.
|
||||
func (b *BeaconState) AppendEth1DataVotes(val *ethpb.Eth1Data) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
votes := b.state.Eth1DataVotes
|
||||
if b.sharedFieldReferences[eth1DataVotes].Refs() > 1 {
|
||||
// Copy elements in underlying array by reference.
|
||||
votes = make([]*ethpb.Eth1Data, len(b.state.Eth1DataVotes))
|
||||
copy(votes, b.state.Eth1DataVotes)
|
||||
b.sharedFieldReferences[eth1DataVotes].MinusRef()
|
||||
b.sharedFieldReferences[eth1DataVotes] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.state.Eth1DataVotes = append(votes, val)
|
||||
b.markFieldAsDirty(eth1DataVotes)
|
||||
b.addDirtyIndices(eth1DataVotes, []uint64{uint64(len(b.state.Eth1DataVotes) - 1)})
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetEth1DepositIndex for the beacon state.
|
||||
func (b *BeaconState) SetEth1DepositIndex(val uint64) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.Eth1DepositIndex = val
|
||||
b.markFieldAsDirty(eth1DepositIndex)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetValidators for the beacon state. Updates the entire
|
||||
// to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetValidators(val []*ethpb.Validator) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.Validators = val
|
||||
b.sharedFieldReferences[validators].MinusRef()
|
||||
b.sharedFieldReferences[validators] = stateutil.NewRef(1)
|
||||
b.markFieldAsDirty(validators)
|
||||
b.rebuildTrie[validators] = true
|
||||
b.valMapHandler = stateutil.NewValMapHandler(b.state.Validators)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyToEveryValidator applies the provided callback function to each validator in the
|
||||
// validator registry.
|
||||
func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error)) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
v := b.state.Validators
|
||||
if ref := b.sharedFieldReferences[validators]; ref.Refs() > 1 {
|
||||
v = b.validatorsReferences()
|
||||
ref.MinusRef()
|
||||
b.sharedFieldReferences[validators] = stateutil.NewRef(1)
|
||||
}
|
||||
b.lock.Unlock()
|
||||
var changedVals []uint64
|
||||
for i, val := range v {
|
||||
changed, newVal, err := f(i, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if changed {
|
||||
changedVals = append(changedVals, uint64(i))
|
||||
v[i] = newVal
|
||||
}
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.Validators = v
|
||||
b.markFieldAsDirty(validators)
|
||||
b.addDirtyIndices(validators, changedVals)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateValidatorAtIndex for the beacon state. Updates the validator
|
||||
// at a specific index to a new value.
|
||||
func (b *BeaconState) UpdateValidatorAtIndex(idx types.ValidatorIndex, val *ethpb.Validator) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
if uint64(len(b.state.Validators)) <= uint64(idx) {
|
||||
return errors.Errorf("invalid index provided %d", idx)
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
v := b.state.Validators
|
||||
if ref := b.sharedFieldReferences[validators]; ref.Refs() > 1 {
|
||||
v = b.validatorsReferences()
|
||||
ref.MinusRef()
|
||||
b.sharedFieldReferences[validators] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
v[idx] = val
|
||||
b.state.Validators = v
|
||||
b.markFieldAsDirty(validators)
|
||||
b.addDirtyIndices(validators, []uint64{uint64(idx)})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBalances for the beacon state. Updates the entire
|
||||
// list to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetBalances(val []uint64) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[balances].MinusRef()
|
||||
b.sharedFieldReferences[balances] = stateutil.NewRef(1)
|
||||
|
||||
b.state.Balances = val
|
||||
b.markFieldAsDirty(balances)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBalancesAtIndex for the beacon state. This method updates the balance
|
||||
// at a specific index to a new value.
|
||||
func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
if uint64(len(b.state.Balances)) <= uint64(idx) {
|
||||
return errors.Errorf("invalid index provided %d", idx)
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
bals := b.state.Balances
|
||||
if b.sharedFieldReferences[balances].Refs() > 1 {
|
||||
bals = b.balances()
|
||||
b.sharedFieldReferences[balances].MinusRef()
|
||||
b.sharedFieldReferences[balances] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
bals[idx] = val
|
||||
b.state.Balances = bals
|
||||
b.markFieldAsDirty(balances)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRandaoMixes for the beacon state. Updates the entire
|
||||
// randao mixes to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[randaoMixes].MinusRef()
|
||||
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
|
||||
|
||||
b.state.RandaoMixes = val
|
||||
b.markFieldAsDirty(randaoMixes)
|
||||
b.rebuildTrie[randaoMixes] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateRandaoMixesAtIndex for the beacon state. Updates the randao mixes
|
||||
// at a specific index to a new value.
|
||||
func (b *BeaconState) UpdateRandaoMixesAtIndex(idx uint64, val []byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
if uint64(len(b.state.RandaoMixes)) <= idx {
|
||||
return errors.Errorf("invalid index provided %d", idx)
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
mixes := b.state.RandaoMixes
|
||||
if refs := b.sharedFieldReferences[randaoMixes].Refs(); refs > 1 {
|
||||
// Copy elements in underlying array by reference.
|
||||
mixes = make([][]byte, len(b.state.RandaoMixes))
|
||||
copy(mixes, b.state.RandaoMixes)
|
||||
b.sharedFieldReferences[randaoMixes].MinusRef()
|
||||
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
mixes[idx] = val
|
||||
b.state.RandaoMixes = mixes
|
||||
b.markFieldAsDirty(randaoMixes)
|
||||
b.addDirtyIndices(randaoMixes, []uint64{idx})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSlashings for the beacon state. Updates the entire
|
||||
// list to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetSlashings(val []uint64) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[slashings].MinusRef()
|
||||
b.sharedFieldReferences[slashings] = stateutil.NewRef(1)
|
||||
|
||||
b.state.Slashings = val
|
||||
b.markFieldAsDirty(slashings)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateSlashingsAtIndex for the beacon state. Updates the slashings
|
||||
// at a specific index to a new value.
|
||||
func (b *BeaconState) UpdateSlashingsAtIndex(idx, val uint64) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
if uint64(len(b.state.Slashings)) <= idx {
|
||||
return errors.Errorf("invalid index provided %d", idx)
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
s := b.state.Slashings
|
||||
if b.sharedFieldReferences[slashings].Refs() > 1 {
|
||||
s = b.slashings()
|
||||
b.sharedFieldReferences[slashings].MinusRef()
|
||||
b.sharedFieldReferences[slashings] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
s[idx] = val
|
||||
|
||||
b.state.Slashings = s
|
||||
|
||||
b.markFieldAsDirty(slashings)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPreviousParticipationBits for the beacon state. Updates the entire
|
||||
// list to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetPreviousParticipationBits(val []byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[previousEpochParticipationBits].MinusRef()
|
||||
b.sharedFieldReferences[previousEpochParticipationBits] = stateutil.NewRef(1)
|
||||
|
||||
b.state.PreviousEpochParticipation = val
|
||||
b.markFieldAsDirty(previousEpochParticipationBits)
|
||||
b.rebuildTrie[previousEpochParticipationBits] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetCurrentParticipationBits for the beacon state. Updates the entire
|
||||
// list to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetCurrentParticipationBits(val []byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[currentEpochParticipationBits].MinusRef()
|
||||
b.sharedFieldReferences[currentEpochParticipationBits] = stateutil.NewRef(1)
|
||||
|
||||
b.state.CurrentEpochParticipation = val
|
||||
b.markFieldAsDirty(currentEpochParticipationBits)
|
||||
b.rebuildTrie[currentEpochParticipationBits] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendHistoricalRoots for the beacon state. Appends the new value
|
||||
// to the the end of list.
|
||||
func (b *BeaconState) AppendHistoricalRoots(root [32]byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
roots := b.state.HistoricalRoots
|
||||
if b.sharedFieldReferences[historicalRoots].Refs() > 1 {
|
||||
roots = make([][]byte, len(b.state.HistoricalRoots))
|
||||
copy(roots, b.state.HistoricalRoots)
|
||||
b.sharedFieldReferences[historicalRoots].MinusRef()
|
||||
b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.state.HistoricalRoots = append(roots, root[:])
|
||||
b.markFieldAsDirty(historicalRoots)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendCurrentParticipationBits for the beacon state. Appends the new value
|
||||
// to the the end of list.
|
||||
func (b *BeaconState) AppendCurrentParticipationBits(val byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
participation := b.state.CurrentEpochParticipation
|
||||
if b.sharedFieldReferences[currentEpochParticipationBits].Refs() > 1 {
|
||||
// Copy elements in underlying array by reference.
|
||||
participation = make([]byte, len(b.state.CurrentEpochParticipation))
|
||||
copy(participation, b.state.CurrentEpochParticipation)
|
||||
b.sharedFieldReferences[currentEpochParticipationBits].MinusRef()
|
||||
b.sharedFieldReferences[currentEpochParticipationBits] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.state.CurrentEpochParticipation = append(participation, val)
|
||||
b.markFieldAsDirty(currentEpochParticipationBits)
|
||||
b.addDirtyIndices(currentEpochParticipationBits, []uint64{uint64(len(b.state.CurrentEpochParticipation) - 1)})
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendPreviousParticipationBits for the beacon state. Appends the new value
|
||||
// to the the end of list.
|
||||
func (b *BeaconState) AppendPreviousParticipationBits(val byte) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
bits := b.state.PreviousEpochParticipation
|
||||
if b.sharedFieldReferences[previousEpochParticipationBits].Refs() > 1 {
|
||||
bits = make([]byte, len(b.state.PreviousEpochParticipation))
|
||||
copy(bits, b.state.PreviousEpochParticipation)
|
||||
b.sharedFieldReferences[previousEpochParticipationBits].MinusRef()
|
||||
b.sharedFieldReferences[previousEpochParticipationBits] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.state.PreviousEpochParticipation = append(bits, val)
|
||||
b.markFieldAsDirty(previousEpochParticipationBits)
|
||||
b.addDirtyIndices(previousEpochParticipationBits, []uint64{uint64(len(b.state.PreviousEpochParticipation) - 1)})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendValidator for the beacon state. Appends the new value
|
||||
// to the the end of list.
|
||||
func (b *BeaconState) AppendValidator(val *ethpb.Validator) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
vals := b.state.Validators
|
||||
if b.sharedFieldReferences[validators].Refs() > 1 {
|
||||
vals = b.validatorsReferences()
|
||||
b.sharedFieldReferences[validators].MinusRef()
|
||||
b.sharedFieldReferences[validators] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
// append validator to slice
|
||||
b.state.Validators = append(vals, val)
|
||||
valIdx := types.ValidatorIndex(len(b.state.Validators) - 1)
|
||||
|
||||
b.valMapHandler.Set(bytesutil.ToBytes48(val.PublicKey), valIdx)
|
||||
|
||||
b.markFieldAsDirty(validators)
|
||||
b.addDirtyIndices(validators, []uint64{uint64(valIdx)})
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendBalance for the beacon state. Appends the new value
|
||||
// to the the end of list.
|
||||
func (b *BeaconState) AppendBalance(bal uint64) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
bals := b.state.Balances
|
||||
if b.sharedFieldReferences[balances].Refs() > 1 {
|
||||
bals = b.balances()
|
||||
b.sharedFieldReferences[balances].MinusRef()
|
||||
b.sharedFieldReferences[balances] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.state.Balances = append(bals, bal)
|
||||
b.markFieldAsDirty(balances)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetJustificationBits for the beacon state.
|
||||
func (b *BeaconState) SetJustificationBits(val bitfield.Bitvector4) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.JustificationBits = val
|
||||
b.markFieldAsDirty(justificationBits)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPreviousJustifiedCheckpoint for the beacon state.
|
||||
func (b *BeaconState) SetPreviousJustifiedCheckpoint(val *ethpb.Checkpoint) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.PreviousJustifiedCheckpoint = val
|
||||
b.markFieldAsDirty(previousJustifiedCheckpoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetCurrentJustifiedCheckpoint for the beacon state.
|
||||
func (b *BeaconState) SetCurrentJustifiedCheckpoint(val *ethpb.Checkpoint) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.CurrentJustifiedCheckpoint = val
|
||||
b.markFieldAsDirty(currentJustifiedCheckpoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFinalizedCheckpoint for the beacon state.
|
||||
func (b *BeaconState) SetFinalizedCheckpoint(val *ethpb.Checkpoint) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.FinalizedCheckpoint = val
|
||||
b.markFieldAsDirty(finalizedCheckpoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetCurrentSyncCommittee for the beacon state.
|
||||
func (b *BeaconState) SetCurrentSyncCommittee(val *ethpb.SyncCommittee) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.CurrentSyncCommittee = val
|
||||
b.markFieldAsDirty(currentSyncCommittee)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNextSyncCommittee for the beacon state.
|
||||
func (b *BeaconState) SetNextSyncCommittee(val *ethpb.SyncCommittee) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.NextSyncCommittee = val
|
||||
b.markFieldAsDirty(nextSyncCommittee)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendInactivityScore for the beacon state.
|
||||
func (b *BeaconState) AppendInactivityScore(s uint64) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
scores := b.state.InactivityScores
|
||||
if b.sharedFieldReferences[inactivityScores].Refs() > 1 {
|
||||
scores = b.inactivityScores()
|
||||
b.sharedFieldReferences[inactivityScores].MinusRef()
|
||||
b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.state.InactivityScores = append(scores, s)
|
||||
b.markFieldAsDirty(inactivityScores)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetInactivityScores for the beacon state. Updates the entire
|
||||
// list to a new value by overwriting the previous one.
|
||||
func (b *BeaconState) SetInactivityScores(val []uint64) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[inactivityScores].MinusRef()
|
||||
b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1)
|
||||
|
||||
b.state.InactivityScores = val
|
||||
b.markFieldAsDirty(inactivityScores)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recomputes the branch up the index in the Merkle trie representation
|
||||
// of the beacon state. This method performs map reads and the caller MUST
|
||||
// hold the lock before calling this method.
|
||||
func (b *BeaconState) recomputeRoot(idx int) {
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
layers := b.merkleLayers
|
||||
// The merkle tree structure looks as follows:
|
||||
// [[r1, r2, r3, r4], [parent1, parent2], [root]]
|
||||
// Using information about the index which changed, idx, we recompute
|
||||
// only its branch up the tree.
|
||||
currentIndex := idx
|
||||
root := b.merkleLayers[0][idx]
|
||||
for i := 0; i < len(layers)-1; i++ {
|
||||
isLeft := currentIndex%2 == 0
|
||||
neighborIdx := currentIndex ^ 1
|
||||
|
||||
neighbor := make([]byte, 32)
|
||||
if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) {
|
||||
neighbor = layers[i][neighborIdx]
|
||||
}
|
||||
if isLeft {
|
||||
parentHash := hashFunc(append(root, neighbor...))
|
||||
root = parentHash[:]
|
||||
} else {
|
||||
parentHash := hashFunc(append(neighbor, root...))
|
||||
root = parentHash[:]
|
||||
}
|
||||
parentIdx := currentIndex / 2
|
||||
// Update the cached layers at the parent index.
|
||||
layers[i+1][parentIdx] = root
|
||||
currentIndex = parentIdx
|
||||
}
|
||||
b.merkleLayers = layers
|
||||
}
|
||||
|
||||
func (b *BeaconState) markFieldAsDirty(field stateTypes.FieldIndex) {
|
||||
_, ok := b.dirtyFields[field]
|
||||
if !ok {
|
||||
b.dirtyFields[field] = true
|
||||
}
|
||||
// do nothing if field already exists
|
||||
}
|
||||
|
||||
// addDirtyIndices adds the relevant dirty field indices, so that they
|
||||
// can be recomputed.
|
||||
func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uint64) {
|
||||
b.dirtyIndices[index] = append(b.dirtyIndices[index], indices...)
|
||||
}
|
||||
|
||||
// SetLatestExecutionPayloadHeader for the beacon state.
|
||||
func (b *BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.state.LatestExecutionPayloadHeader = val
|
||||
b.markFieldAsDirty(latestExecutionPayloadHeader)
|
||||
return nil
|
||||
}
|
||||
374
beacon-chain/state/v3/state_trie.go
Normal file
374
beacon-chain/state/v3/state_trie.go
Normal file
@@ -0,0 +1,374 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// InitializeFromProto the beacon state from a protobuf representation.
|
||||
func InitializeFromProto(st *ethpb.BeaconStateMerge) (*BeaconState, error) {
|
||||
return InitializeFromProtoUnsafe(proto.Clone(st).(*ethpb.BeaconStateMerge))
|
||||
}
|
||||
|
||||
// InitializeFromProtoUnsafe directly uses the beacon state protobuf pointer
|
||||
// and sets it as the inner state of the BeaconState type.
|
||||
func InitializeFromProtoUnsafe(st *ethpb.BeaconStateMerge) (*BeaconState, error) {
|
||||
if st == nil {
|
||||
return nil, errors.New("received nil state")
|
||||
}
|
||||
|
||||
fieldCount := params.BeaconConfig().BeaconStateMergeFieldCount
|
||||
b := &BeaconState{
|
||||
state: st,
|
||||
dirtyFields: make(map[types.FieldIndex]interface{}, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference, 11),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
valMapHandler: stateutil.NewValMapHandler(st.Validators),
|
||||
}
|
||||
|
||||
var err error
|
||||
for i := 0; i < fieldCount; i++ {
|
||||
b.dirtyFields[types.FieldIndex(i)] = true
|
||||
b.rebuildTrie[types.FieldIndex(i)] = true
|
||||
b.dirtyIndices[types.FieldIndex(i)] = []uint64{}
|
||||
b.stateFieldLeaves[types.FieldIndex(i)], err = fieldtrie.NewFieldTrie(types.FieldIndex(i), types.BasicArray, nil, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize field reference tracking for shared data.
|
||||
b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[previousEpochParticipationBits] = stateutil.NewRef(1) // New in Altair.
|
||||
b.sharedFieldReferences[currentEpochParticipationBits] = stateutil.NewRef(1) // New in Altair.
|
||||
b.sharedFieldReferences[slashings] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[eth1DataVotes] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[validators] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[balances] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1) // New in Altair.
|
||||
b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1)
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the beacon state.
|
||||
func (b *BeaconState) Copy() state.BeaconState {
|
||||
if !b.hasInnerState() {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
fieldCount := params.BeaconConfig().BeaconStateMergeFieldCount
|
||||
|
||||
dst := &BeaconState{
|
||||
state: ðpb.BeaconStateMerge{
|
||||
// Primitive types, safe to copy.
|
||||
GenesisTime: b.state.GenesisTime,
|
||||
Slot: b.state.Slot,
|
||||
Eth1DepositIndex: b.state.Eth1DepositIndex,
|
||||
|
||||
// Large arrays, infrequently changed, constant size.
|
||||
RandaoMixes: b.state.RandaoMixes,
|
||||
StateRoots: b.state.StateRoots,
|
||||
BlockRoots: b.state.BlockRoots,
|
||||
Slashings: b.state.Slashings,
|
||||
Eth1DataVotes: b.state.Eth1DataVotes,
|
||||
|
||||
// Large arrays, increases over time.
|
||||
Validators: b.state.Validators,
|
||||
Balances: b.state.Balances,
|
||||
HistoricalRoots: b.state.HistoricalRoots,
|
||||
PreviousEpochParticipation: b.state.PreviousEpochParticipation,
|
||||
CurrentEpochParticipation: b.state.CurrentEpochParticipation,
|
||||
InactivityScores: b.state.InactivityScores,
|
||||
|
||||
// Everything else, too small to be concerned about, constant size.
|
||||
Fork: b.fork(),
|
||||
LatestBlockHeader: b.latestBlockHeader(),
|
||||
Eth1Data: b.eth1Data(),
|
||||
JustificationBits: b.justificationBits(),
|
||||
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: b.finalizedCheckpoint(),
|
||||
GenesisValidatorsRoot: b.genesisValidatorRoot(),
|
||||
CurrentSyncCommittee: b.currentSyncCommittee(),
|
||||
NextSyncCommittee: b.nextSyncCommittee(),
|
||||
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader(),
|
||||
},
|
||||
dirtyFields: make(map[types.FieldIndex]interface{}, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference, 11),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
|
||||
// Copy on write validator index map.
|
||||
valMapHandler: b.valMapHandler,
|
||||
}
|
||||
|
||||
for field, ref := range b.sharedFieldReferences {
|
||||
ref.AddRef()
|
||||
dst.sharedFieldReferences[field] = ref
|
||||
}
|
||||
|
||||
// Increment ref for validator map
|
||||
b.valMapHandler.AddRef()
|
||||
|
||||
for i := range b.dirtyFields {
|
||||
dst.dirtyFields[i] = true
|
||||
}
|
||||
|
||||
for i := range b.dirtyIndices {
|
||||
indices := make([]uint64, len(b.dirtyIndices[i]))
|
||||
copy(indices, b.dirtyIndices[i])
|
||||
dst.dirtyIndices[i] = indices
|
||||
}
|
||||
|
||||
for i := range b.rebuildTrie {
|
||||
dst.rebuildTrie[i] = true
|
||||
}
|
||||
|
||||
for fldIdx, fieldTrie := range b.stateFieldLeaves {
|
||||
dst.stateFieldLeaves[fldIdx] = fieldTrie
|
||||
if fieldTrie.FieldReference() != nil {
|
||||
fieldTrie.Lock()
|
||||
fieldTrie.FieldReference().AddRef()
|
||||
fieldTrie.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if b.merkleLayers != nil {
|
||||
dst.merkleLayers = make([][][]byte, len(b.merkleLayers))
|
||||
for i, layer := range b.merkleLayers {
|
||||
dst.merkleLayers[i] = make([][]byte, len(layer))
|
||||
for j, content := range layer {
|
||||
dst.merkleLayers[i][j] = make([]byte, len(content))
|
||||
copy(dst.merkleLayers[i][j], content)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Finalizer runs when dst is being destroyed in garbage collection.
|
||||
runtime.SetFinalizer(dst, func(b *BeaconState) {
|
||||
for field, v := range b.sharedFieldReferences {
|
||||
v.MinusRef()
|
||||
if b.stateFieldLeaves[field].FieldReference() != nil {
|
||||
b.stateFieldLeaves[field].FieldReference().MinusRef()
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// HashTreeRoot of the beacon state retrieves the Merkle root of the trie
|
||||
// representation of the beacon state based on the eth2 Simple Serialize specification.
|
||||
func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconStateMerge.HashTreeRoot")
|
||||
defer span.End()
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.merkleLayers == nil || len(b.merkleLayers) == 0 {
|
||||
fieldRoots, err := computeFieldRoots(b.state)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
layers := stateutil.Merkleize(fieldRoots)
|
||||
b.merkleLayers = layers
|
||||
b.dirtyFields = make(map[types.FieldIndex]interface{}, params.BeaconConfig().BeaconStateMergeFieldCount)
|
||||
}
|
||||
|
||||
for field := range b.dirtyFields {
|
||||
root, err := b.rootSelector(field)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
b.merkleLayers[0][field] = root[:]
|
||||
b.recomputeRoot(int(field))
|
||||
delete(b.dirtyFields, field)
|
||||
}
|
||||
return bytesutil.ToBytes32(b.merkleLayers[len(b.merkleLayers)-1][0]), nil
|
||||
}
|
||||
|
||||
// FieldReferencesCount returns the reference count held by each field. This
|
||||
// also includes the field trie held by each field.
|
||||
func (b *BeaconState) FieldReferencesCount() map[string]uint64 {
|
||||
refMap := make(map[string]uint64)
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
for i, f := range b.sharedFieldReferences {
|
||||
refMap[i.String(b.Version())] = uint64(f.Refs())
|
||||
}
|
||||
for i, f := range b.stateFieldLeaves {
|
||||
numOfRefs := uint64(f.FieldReference().Refs())
|
||||
f.RLock()
|
||||
if !f.Empty() {
|
||||
refMap[i.String(b.Version())+"_trie"] = numOfRefs
|
||||
}
|
||||
f.RUnlock()
|
||||
}
|
||||
return refMap
|
||||
}
|
||||
|
||||
// IsNil checks if the state and the underlying proto
|
||||
// object are nil.
|
||||
func (b *BeaconState) IsNil() bool {
|
||||
return b == nil || b.state == nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) rootSelector(field types.FieldIndex) ([32]byte, error) {
|
||||
hasher := hash.CustomSHA256Hasher()
|
||||
switch field {
|
||||
case genesisTime:
|
||||
return ssz.Uint64Root(b.state.GenesisTime), nil
|
||||
case genesisValidatorRoot:
|
||||
return bytesutil.ToBytes32(b.state.GenesisValidatorsRoot), nil
|
||||
case slot:
|
||||
return ssz.Uint64Root(uint64(b.state.Slot)), nil
|
||||
case eth1DepositIndex:
|
||||
return ssz.Uint64Root(b.state.Eth1DepositIndex), nil
|
||||
case fork:
|
||||
return ssz.ForkRoot(b.state.Fork)
|
||||
case latestBlockHeader:
|
||||
return stateutil.BlockHeaderRoot(b.state.LatestBlockHeader)
|
||||
case blockRoots:
|
||||
if b.rebuildTrie[field] {
|
||||
err := b.resetFieldTrie(field, b.state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
b.dirtyIndices[field] = []uint64{}
|
||||
delete(b.rebuildTrie, field)
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
return b.recomputeFieldTrie(blockRoots, b.state.BlockRoots)
|
||||
case stateRoots:
|
||||
if b.rebuildTrie[field] {
|
||||
err := b.resetFieldTrie(field, b.state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
b.dirtyIndices[field] = []uint64{}
|
||||
delete(b.rebuildTrie, field)
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
return b.recomputeFieldTrie(stateRoots, b.state.StateRoots)
|
||||
case historicalRoots:
|
||||
return ssz.ByteArrayRootWithLimit(b.state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit)
|
||||
case eth1Data:
|
||||
return eth1Root(hasher, b.state.Eth1Data)
|
||||
case eth1DataVotes:
|
||||
if b.rebuildTrie[field] {
|
||||
err := b.resetFieldTrie(field, b.state.Eth1DataVotes, uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod))))
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
b.dirtyIndices[field] = []uint64{}
|
||||
delete(b.rebuildTrie, field)
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
return b.recomputeFieldTrie(field, b.state.Eth1DataVotes)
|
||||
case validators:
|
||||
if b.rebuildTrie[field] {
|
||||
err := b.resetFieldTrie(field, b.state.Validators, params.BeaconConfig().ValidatorRegistryLimit)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
b.dirtyIndices[validators] = []uint64{}
|
||||
delete(b.rebuildTrie, validators)
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
return b.recomputeFieldTrie(validators, b.state.Validators)
|
||||
case balances:
|
||||
return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances)
|
||||
case randaoMixes:
|
||||
if b.rebuildTrie[field] {
|
||||
err := b.resetFieldTrie(field, b.state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector))
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
b.dirtyIndices[field] = []uint64{}
|
||||
delete(b.rebuildTrie, field)
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
return b.recomputeFieldTrie(randaoMixes, b.state.RandaoMixes)
|
||||
case slashings:
|
||||
return ssz.SlashingsRoot(b.state.Slashings)
|
||||
case previousEpochParticipationBits:
|
||||
return stateutil.ParticipationBitsRoot(b.state.PreviousEpochParticipation)
|
||||
case currentEpochParticipationBits:
|
||||
return stateutil.ParticipationBitsRoot(b.state.CurrentEpochParticipation)
|
||||
case justificationBits:
|
||||
return bytesutil.ToBytes32(b.state.JustificationBits), nil
|
||||
case previousJustifiedCheckpoint:
|
||||
return ssz.CheckpointRoot(hasher, b.state.PreviousJustifiedCheckpoint)
|
||||
case currentJustifiedCheckpoint:
|
||||
return ssz.CheckpointRoot(hasher, b.state.CurrentJustifiedCheckpoint)
|
||||
case finalizedCheckpoint:
|
||||
return ssz.CheckpointRoot(hasher, b.state.FinalizedCheckpoint)
|
||||
case inactivityScores:
|
||||
return stateutil.Uint64ListRootWithRegistryLimit(b.state.InactivityScores)
|
||||
case currentSyncCommittee:
|
||||
return stateutil.SyncCommitteeRoot(b.state.CurrentSyncCommittee)
|
||||
case nextSyncCommittee:
|
||||
return stateutil.SyncCommitteeRoot(b.state.NextSyncCommittee)
|
||||
case latestExecutionPayloadHeader:
|
||||
return b.state.LatestExecutionPayloadHeader.HashTreeRoot()
|
||||
}
|
||||
return [32]byte{}, errors.New("invalid field index provided")
|
||||
}
|
||||
|
||||
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) {
|
||||
fTrie := b.stateFieldLeaves[index]
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
fTrie.Lock()
|
||||
defer fTrie.Unlock()
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.CopyTrie()
|
||||
b.stateFieldLeaves[index] = newTrie
|
||||
fTrie = newTrie
|
||||
}
|
||||
// remove duplicate indexes
|
||||
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
|
||||
// sort indexes again
|
||||
sort.Slice(b.dirtyIndices[index], func(i int, j int) bool {
|
||||
return b.dirtyIndices[index][i] < b.dirtyIndices[index][j]
|
||||
})
|
||||
root, err := fTrie.RecomputeTrie(b.dirtyIndices[index], elements)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
b.dirtyIndices[index] = []uint64{}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) resetFieldTrie(index types.FieldIndex, elements interface{}, length uint64) error {
|
||||
fTrie, err := fieldtrie.NewFieldTrie(index, fieldMap[index], elements, length)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.stateFieldLeaves[index] = fTrie
|
||||
b.dirtyIndices[index] = []uint64{}
|
||||
return nil
|
||||
}
|
||||
76
beacon-chain/state/v3/types.go
Normal file
76
beacon-chain/state/v3/types.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fieldMap = make(map[types.FieldIndex]types.DataType, params.BeaconConfig().BeaconStateMergeFieldCount)
|
||||
|
||||
// Initialize the fixed sized arrays.
|
||||
fieldMap[types.BlockRoots] = types.BasicArray
|
||||
fieldMap[types.StateRoots] = types.BasicArray
|
||||
fieldMap[types.RandaoMixes] = types.BasicArray
|
||||
|
||||
// Initialize the composite arrays.
|
||||
fieldMap[types.Eth1DataVotes] = types.CompositeArray
|
||||
fieldMap[types.Validators] = types.CompositeArray
|
||||
}
|
||||
|
||||
// Field Aliases for values from the types package.
|
||||
const (
|
||||
genesisTime = types.GenesisTime
|
||||
genesisValidatorRoot = types.GenesisValidatorRoot
|
||||
slot = types.Slot
|
||||
fork = types.Fork
|
||||
latestBlockHeader = types.LatestBlockHeader
|
||||
blockRoots = types.BlockRoots
|
||||
stateRoots = types.StateRoots
|
||||
historicalRoots = types.HistoricalRoots
|
||||
eth1Data = types.Eth1Data
|
||||
eth1DataVotes = types.Eth1DataVotes
|
||||
eth1DepositIndex = types.Eth1DepositIndex
|
||||
validators = types.Validators
|
||||
balances = types.Balances
|
||||
randaoMixes = types.RandaoMixes
|
||||
slashings = types.Slashings
|
||||
previousEpochParticipationBits = types.PreviousEpochParticipationBits
|
||||
currentEpochParticipationBits = types.CurrentEpochParticipationBits
|
||||
justificationBits = types.JustificationBits
|
||||
previousJustifiedCheckpoint = types.PreviousJustifiedCheckpoint
|
||||
currentJustifiedCheckpoint = types.CurrentJustifiedCheckpoint
|
||||
finalizedCheckpoint = types.FinalizedCheckpoint
|
||||
inactivityScores = types.InactivityScores
|
||||
currentSyncCommittee = types.CurrentSyncCommittee
|
||||
nextSyncCommittee = types.NextSyncCommittee
|
||||
latestExecutionPayloadHeader = types.LatestExecutionPayloadHeader
|
||||
)
|
||||
|
||||
// fieldMap keeps track of each field
|
||||
// to its corresponding data type.
|
||||
var fieldMap map[types.FieldIndex]types.DataType
|
||||
|
||||
// ErrNilInnerState returns when the inner state is nil and no copy set or get
|
||||
// operations can be performed on state.
|
||||
var ErrNilInnerState = errors.New("nil inner state")
|
||||
|
||||
// BeaconState defines a struct containing utilities for the eth2 chain state, defining
|
||||
// getters and setters for its respective values and helpful functions such as HashTreeRoot().
|
||||
type BeaconState struct {
|
||||
state *ethpb.BeaconStateMerge
|
||||
lock sync.RWMutex
|
||||
dirtyFields map[types.FieldIndex]interface{}
|
||||
dirtyIndices map[types.FieldIndex][]uint64
|
||||
stateFieldLeaves map[types.FieldIndex]*fieldtrie.FieldTrie
|
||||
rebuildTrie map[types.FieldIndex]bool
|
||||
valMapHandler *stateutil.ValidatorMapHandler
|
||||
merkleLayers [][][]byte
|
||||
sharedFieldReferences map[types.FieldIndex]*stateutil.Reference
|
||||
}
|
||||
@@ -57,6 +57,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
|
||||
@@ -280,6 +280,19 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
|
||||
}
|
||||
blockRoots[i] = blkRoot
|
||||
}
|
||||
|
||||
bFunc = func(ctx context.Context, blks []block.SignedBeaconBlock, roots [][32]byte) error {
|
||||
if len(blks) != len(roots) {
|
||||
return errors.New("incorrect lengths")
|
||||
}
|
||||
for i, blk := range blks {
|
||||
if err := s.cfg.Chain.ReceiveBlock(ctx, blk, roots[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return bFunc(ctx, blks, blockRoots)
|
||||
}
|
||||
|
||||
|
||||
@@ -138,7 +138,8 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.validateBeaconBlock(ctx, b, blkRoot); err != nil {
|
||||
genesisTime := uint64(s.cfg.Chain.GenesisTime().Unix())
|
||||
if err := s.validateBeaconBlock(ctx, b, blkRoot, genesisTime); err != nil {
|
||||
log.Debugf("Could not validate block from slot %d: %v", b.Block().Slot(), err)
|
||||
s.setBadBlock(ctx, blkRoot)
|
||||
tracing.AnnotateError(span, err)
|
||||
|
||||
@@ -45,6 +45,13 @@ func WriteBlockChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Merge:
|
||||
valRoot := chain.GenesisValidatorRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().MergeForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
}
|
||||
|
||||
if err := writeContextToStream(obtainedCtx, stream, chain); err != nil {
|
||||
|
||||
@@ -20,6 +20,8 @@ func TestExtractBlockDataType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
mergeDigest, err := signing.ComputeForkDigest(params.BeaconConfig().MergeForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
@@ -80,6 +82,19 @@ func TestExtractBlockDataType(t *testing.T) {
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "merge fork version",
|
||||
args: args{
|
||||
digest: mergeDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() block.SignedBeaconBlock {
|
||||
wsb, err := wrapper.WrappedMergeSignedBeaconBlock(ðpb.SignedBeaconBlockMerge{Block: ðpb.BeaconBlockMerge{}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -72,6 +72,8 @@ func blockFromProto(msg proto.Message) (block.SignedBeaconBlock, error) {
|
||||
return wrapper.WrappedPhase0SignedBeaconBlock(t), nil
|
||||
case *ethpb.SignedBeaconBlockAltair:
|
||||
return wrapperv2.WrappedAltairSignedBeaconBlock(t)
|
||||
case *ethpb.SignedBeaconBlockMerge:
|
||||
return wrapperv2.WrappedMergeSignedBeaconBlock(t)
|
||||
default:
|
||||
return nil, errors.Errorf("message has invalid underlying type: %T", msg)
|
||||
}
|
||||
|
||||
@@ -147,6 +147,18 @@ func TestBlockFromProto(t *testing.T) {
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "merge type provided",
|
||||
msgCreator: func(t *testing.T) proto.Message {
|
||||
return ðpb.SignedBeaconBlockMerge{Block: ðpb.BeaconBlockMerge{Slot: 100}}
|
||||
},
|
||||
want: func() block.SignedBeaconBlock {
|
||||
wsb, err := wrapper.WrappedMergeSignedBeaconBlock(ðpb.SignedBeaconBlockMerge{Block: ðpb.BeaconBlockMerge{Slot: 100}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/execution"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -159,7 +161,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
return pubsub.ValidationIgnore, errors.Errorf("unknown parent for block with slot %d and parent root %#x", blk.Block().Slot(), blk.Block().ParentRoot())
|
||||
}
|
||||
|
||||
if err := s.validateBeaconBlock(ctx, blk, blockRoot); err != nil {
|
||||
if err := s.validateBeaconBlock(ctx, blk, blockRoot, genesisTime); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
@@ -179,7 +181,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateBeaconBlock(ctx context.Context, blk block.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
func (s *Service) validateBeaconBlock(ctx context.Context, blk block.SignedBeaconBlock, blockRoot [32]byte, genesisTime uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -226,6 +228,30 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk block.SignedBeaco
|
||||
return errors.New("incorrect proposer index")
|
||||
}
|
||||
|
||||
// check if the block has execution payload.
|
||||
// If yes, then do few more checks per spec
|
||||
if parentState.Version() == version.Merge {
|
||||
executionEnabled, err := execution.Enabled(parentState, blk.Block().Body())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if executionEnabled {
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
if err != nil || payload == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// [REJECT] The block's execution payload timestamp is correct with respect to the slot --
|
||||
// i.e. execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot).
|
||||
t, err := slots.ToTime(genesisTime, blk.Block().Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.Timestamp != uint64(t.Unix()) {
|
||||
return errors.New("incorrect timestamp")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1045,3 +1045,297 @@ func TestService_isBlockQueueable(t *testing.T) {
|
||||
result = isBlockQueueable(genesisTime, blockSlot, receivedTime)
|
||||
assert.Equal(t, true, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
beaconState, privKeys := util.DeterministicGenesisStateMerge(t, 100)
|
||||
parentBlock := util.NewBeaconBlockMerge()
|
||||
signedParentBlock, err := wrapper.WrappedMergeSignedBeaconBlock(parentBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, signedParentBlock))
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
|
||||
require.NoError(t, err)
|
||||
|
||||
presentTime := time.Now().Unix()
|
||||
msg := util.NewBeaconBlockMerge()
|
||||
msg.Block.ParentRoot = bRoot[:]
|
||||
msg.Block.Slot = 1
|
||||
msg.Block.ProposerIndex = proposerIdx
|
||||
msg.Block.Body.ExecutionPayload.Timestamp = uint64(presentTime)
|
||||
msg.Block.Body.ExecutionPayload.GasUsed = 10
|
||||
msg.Block.Body.ExecutionPayload.GasLimit = 11
|
||||
msg.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("blockHash"), 32)
|
||||
msg.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte("parentHash"), 32)
|
||||
msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1"))
|
||||
msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 2"))
|
||||
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
stateGen := stategen.New(db)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(presentTime-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
}}
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
DB: db,
|
||||
P2P: p,
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
BlockNotifier: chainService.BlockNotifier(),
|
||||
StateGen: stateGen,
|
||||
},
|
||||
seenBlockCache: lruwrpr.New(10),
|
||||
badBlockCache: lruwrpr.New(10),
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
genesisValidatorRoot := r.cfg.Chain.GenesisValidatorRoot()
|
||||
mergeDigest, err := signing.ComputeForkDigest(params.BeaconConfig().MergeForkVersion, genesisValidatorRoot[:])
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, mergeDigest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
require.NoError(t, err)
|
||||
result := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, true, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_InvalidPayloadTimestamp(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
beaconState, privKeys := util.DeterministicGenesisStateMerge(t, 100)
|
||||
parentBlock := util.NewBeaconBlockMerge()
|
||||
signedParentBlock, err := wrapper.WrappedMergeSignedBeaconBlock(parentBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, signedParentBlock))
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
|
||||
require.NoError(t, err)
|
||||
|
||||
presentTime := time.Now().Unix()
|
||||
msg := util.NewBeaconBlockMerge()
|
||||
msg.Block.ParentRoot = bRoot[:]
|
||||
msg.Block.Slot = 1
|
||||
msg.Block.ProposerIndex = proposerIdx
|
||||
msg.Block.Body.ExecutionPayload.Timestamp = uint64(presentTime - 600) // add an invalid timestamp
|
||||
msg.Block.Body.ExecutionPayload.GasUsed = 10
|
||||
msg.Block.Body.ExecutionPayload.GasLimit = 11
|
||||
msg.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("blockHash"), 32)
|
||||
msg.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte("parentHash"), 32)
|
||||
msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1"))
|
||||
msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 2"))
|
||||
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
stateGen := stategen.New(db)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(presentTime-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
}}
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
DB: db,
|
||||
P2P: p,
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
BlockNotifier: chainService.BlockNotifier(),
|
||||
StateGen: stateGen,
|
||||
},
|
||||
seenBlockCache: lruwrpr.New(10),
|
||||
badBlockCache: lruwrpr.New(10),
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
genesisValidatorRoot := r.cfg.Chain.GenesisValidatorRoot()
|
||||
mergeDigest, err := signing.ComputeForkDigest(params.BeaconConfig().MergeForkVersion, genesisValidatorRoot[:])
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, mergeDigest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
require.NotNil(t, err)
|
||||
result := res == pubsub.ValidationReject
|
||||
assert.Equal(t, true, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_InvalidPayloadGasUsed(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
beaconState, privKeys := util.DeterministicGenesisStateMerge(t, 100)
|
||||
parentBlock := util.NewBeaconBlockMerge()
|
||||
signedParentBlock, err := wrapper.WrappedMergeSignedBeaconBlock(parentBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, signedParentBlock))
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
|
||||
require.NoError(t, err)
|
||||
|
||||
presentTime := time.Now().Unix()
|
||||
msg := util.NewBeaconBlockMerge()
|
||||
msg.Block.ParentRoot = bRoot[:]
|
||||
msg.Block.Slot = 1
|
||||
msg.Block.ProposerIndex = proposerIdx
|
||||
msg.Block.Body.ExecutionPayload.Timestamp = uint64(presentTime)
|
||||
msg.Block.Body.ExecutionPayload.GasUsed = 12
|
||||
msg.Block.Body.ExecutionPayload.GasLimit = 11
|
||||
msg.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("blockHash"), 32)
|
||||
msg.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte("parentHash"), 32)
|
||||
msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1"))
|
||||
msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 2"))
|
||||
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
stateGen := stategen.New(db)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(presentTime-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
}}
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
DB: db,
|
||||
P2P: p,
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
BlockNotifier: chainService.BlockNotifier(),
|
||||
StateGen: stateGen,
|
||||
},
|
||||
seenBlockCache: lruwrpr.New(10),
|
||||
badBlockCache: lruwrpr.New(10),
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
genesisValidatorRoot := r.cfg.Chain.GenesisValidatorRoot()
|
||||
mergeDigest, err := signing.ComputeForkDigest(params.BeaconConfig().MergeForkVersion, genesisValidatorRoot[:])
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, mergeDigest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
require.NotNil(t, err)
|
||||
result := res == pubsub.ValidationReject
|
||||
assert.Equal(t, true, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_InvalidParentHashInPayload(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
beaconState, privKeys := util.DeterministicGenesisStateMerge(t, 100)
|
||||
parentBlock := util.NewBeaconBlockMerge()
|
||||
signedParentBlock, err := wrapper.WrappedMergeSignedBeaconBlock(parentBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, signedParentBlock))
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
|
||||
require.NoError(t, err)
|
||||
|
||||
presentTime := time.Now().Unix()
|
||||
msg := util.NewBeaconBlockMerge()
|
||||
msg.Block.ParentRoot = bRoot[:]
|
||||
msg.Block.Slot = 1
|
||||
msg.Block.ProposerIndex = proposerIdx
|
||||
msg.Block.Body.ExecutionPayload.Timestamp = uint64(presentTime)
|
||||
msg.Block.Body.ExecutionPayload.GasUsed = 10
|
||||
msg.Block.Body.ExecutionPayload.GasLimit = 11
|
||||
msg.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("blockHash"), 32)
|
||||
msg.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte("InvalidHash"), 32)
|
||||
msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 1"))
|
||||
msg.Block.Body.ExecutionPayload.Transactions = append(msg.Block.Body.ExecutionPayload.Transactions, []byte("transaction 2"))
|
||||
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
stateGen := stategen.New(db)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(presentTime-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
}}
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
DB: db,
|
||||
P2P: p,
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
BlockNotifier: chainService.BlockNotifier(),
|
||||
StateGen: stateGen,
|
||||
},
|
||||
seenBlockCache: lruwrpr.New(10),
|
||||
badBlockCache: lruwrpr.New(10),
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
genesisValidatorRoot := r.cfg.Chain.GenesisValidatorRoot()
|
||||
mergeDigest, err := signing.ComputeForkDigest(params.BeaconConfig().MergeForkVersion, genesisValidatorRoot[:])
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, mergeDigest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
|
||||
// set the max payload size to small value to test
|
||||
//params.BeaconConfig().MaxExecutionTransactions = 1
|
||||
//params.BeaconConfig().MaxBytesPerOpaqueTransaction = 9
|
||||
|
||||
res, err := r.validateBeaconBlockPubSub(ctx, "", m)
|
||||
require.NotNil(t, err)
|
||||
result := res == pubsub.ValidationReject
|
||||
assert.Equal(t, true, result)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -189,4 +190,37 @@ var (
|
||||
Usage: "Sets the minimum number of peers that a node will attempt to peer with that are subscribed to a subnet.",
|
||||
Value: 6,
|
||||
}
|
||||
// EnableMerge enables necessary features to run merge testnet.
|
||||
EnableMerge = &cli.BoolFlag{
|
||||
Name: "merge",
|
||||
Usage: "enables necessary features to run merge testnet, this is unstable and is for developers only",
|
||||
Value: false,
|
||||
}
|
||||
// TerminalTotalDifficultyOverride specifies the total difficulty to manual overrides the `TERMINAL_TOTAL_DIFFICULTY` parameter.
|
||||
TerminalTotalDifficultyOverride = &cli.Uint64Flag{
|
||||
Name: "terminal-total-difficulty-override",
|
||||
Usage: "Sets the total difficulty to manual overrides the default TERMINAL_TOTAL_DIFFICULTY value." +
|
||||
"Warning: This flag should be used only if you have a clear understanding that community has decided to override the terminal difficulty." +
|
||||
"Incorrect usage will result in your node experience consensus value.",
|
||||
}
|
||||
// TerminalBlockHashOverride specifies the terminal block hash to manual overrides the `TERMINAL_BLOCK_HASH` parameter.
|
||||
TerminalBlockHashOverride = &cli.StringFlag{
|
||||
Name: "terminal-block-hash-override",
|
||||
Usage: "Sets the block hash to manual overrides the default TERMINAL_BLOCK_HASH value." +
|
||||
"Warning: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash." +
|
||||
"Incorrect usage will result in your node experience consensus value.",
|
||||
}
|
||||
// TerminalBlockHashActivationEpochOverride specifies the terminal block hash epoch to manual overrides the `TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH` parameter.
|
||||
TerminalBlockHashActivationEpochOverride = &cli.StringFlag{
|
||||
Name: "terminal-block-hash-epoch-override`",
|
||||
Usage: "Sets the block hash epoch to manual overrides the default TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH value." +
|
||||
"Warning: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash activation epoch." +
|
||||
"Incorrect usage will result in your node experience consensus value.",
|
||||
}
|
||||
// FeeRecipient specifies the fee recipient for the transaction fees.
|
||||
FeeRecipient = &cli.StringFlag{
|
||||
Name: "fee-recipient",
|
||||
Usage: "Post merge, this address will receive the transaction fees produced by any blocks from this node. Default to junk whilst merge is in development state.",
|
||||
Value: hex.EncodeToString([]byte("0x0000000000000000000000000000000000000001")),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -63,7 +63,11 @@ var appFlags = []cli.Flag{
|
||||
flags.WeakSubjectivityCheckpt,
|
||||
flags.Eth1HeaderReqLimit,
|
||||
flags.GenesisStatePath,
|
||||
flags.MinPeersPerSubnet,
|
||||
flags.EnableMerge,
|
||||
flags.TerminalTotalDifficultyOverride,
|
||||
flags.TerminalBlockHashOverride,
|
||||
flags.TerminalBlockHashActivationEpochOverride,
|
||||
flags.FeeRecipient,
|
||||
cmd.EnableBackupWebhookFlag,
|
||||
cmd.BackupWebhookOutputDir,
|
||||
cmd.MinimalConfigFlag,
|
||||
|
||||
@@ -121,7 +121,11 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.WeakSubjectivityCheckpt,
|
||||
flags.Eth1HeaderReqLimit,
|
||||
flags.GenesisStatePath,
|
||||
flags.MinPeersPerSubnet,
|
||||
flags.EnableMerge,
|
||||
flags.TerminalTotalDifficultyOverride,
|
||||
flags.TerminalBlockHashOverride,
|
||||
flags.TerminalBlockHashActivationEpochOverride,
|
||||
flags.FeeRecipient,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -37,6 +37,7 @@ const disabledFeatureFlag = "Disabled feature flag"
|
||||
type Flags struct {
|
||||
// Testnet Flags.
|
||||
PyrmontTestnet bool // PyrmontTestnet defines the flag through which we can enable the node to run on the Pyrmont testnet.
|
||||
MergeTestnet bool // MergeTestnet defines the flag through which we can enable node to run on the merge testnet.
|
||||
|
||||
// Feature related flags.
|
||||
RemoteSlasherProtection bool // RemoteSlasherProtection utilizes a beacon node with --slasher mode for validator slashing protection.
|
||||
@@ -130,6 +131,10 @@ func configureTestnet(ctx *cli.Context, cfg *Flags) {
|
||||
log.Warn("Running on the Prater Testnet")
|
||||
params.UsePraterConfig()
|
||||
params.UsePraterNetworkConfig()
|
||||
} else if ctx.Bool(MergeTestnet.Name) {
|
||||
log.Warn("Running on the Merge Testnet")
|
||||
params.UseMergeTestConfig()
|
||||
params.UseMergeTestNetworkConfig()
|
||||
} else {
|
||||
log.Warn("Running on Ethereum Consensus Mainnet")
|
||||
params.UseMainnetConfig()
|
||||
@@ -223,6 +228,10 @@ func ConfigureBeaconChain(ctx *cli.Context) {
|
||||
logEnabled(enableBatchGossipVerification)
|
||||
cfg.EnableBatchVerification = true
|
||||
}
|
||||
if ctx.Bool(MergeTestnet.Name) {
|
||||
cfg.MergeTestnet = true
|
||||
}
|
||||
|
||||
Init(cfg)
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,11 @@ var (
|
||||
Name: "prater",
|
||||
Usage: "Run Prysm configured for the Prater test network",
|
||||
}
|
||||
// MergeTestnet flag for the multiclient Ethereum consensus testnet.
|
||||
MergeTestnet = &cli.BoolFlag{
|
||||
Name: "merge-testnet",
|
||||
Usage: "Run Prysm configured for the Merge test network",
|
||||
}
|
||||
// Mainnet flag for easier tooling, no-op
|
||||
Mainnet = &cli.BoolFlag{
|
||||
Value: true,
|
||||
@@ -156,6 +161,7 @@ var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
|
||||
disableAttestingHistoryDBCache,
|
||||
PyrmontTestnet,
|
||||
PraterTestnet,
|
||||
MergeTestnet,
|
||||
Mainnet,
|
||||
dynamicKeyReloadDebounceInterval,
|
||||
attestTimely,
|
||||
@@ -176,6 +182,7 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
attestationAggregationStrategy,
|
||||
PyrmontTestnet,
|
||||
PraterTestnet,
|
||||
MergeTestnet,
|
||||
Mainnet,
|
||||
enablePeerScorer,
|
||||
enableLargerGossipHistory,
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
"minimal_config.go",
|
||||
"network_config.go",
|
||||
"testnet_e2e_config.go",
|
||||
"testnet_merge_config.go",
|
||||
"testnet_prater_config.go",
|
||||
"testnet_pyrmont_config.go",
|
||||
"testutils.go",
|
||||
@@ -22,6 +23,7 @@ go_library(
|
||||
deps = [
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//params:go_default_library",
|
||||
"@com_github_mohae_deepcopy//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ package params
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
)
|
||||
@@ -120,6 +121,7 @@ type BeaconChainConfig struct {
|
||||
GenesisCountdownInterval time.Duration // How often to log the countdown until the genesis time is reached.
|
||||
BeaconStateFieldCount int // BeaconStateFieldCount defines how many fields are in beacon state.
|
||||
BeaconStateAltairFieldCount int // BeaconStateAltairFieldCount defines how many fields are in beacon state hard fork 1.
|
||||
BeaconStateMergeFieldCount int // BeaconStateMergeFieldCount defines how many fields are in beacon state hard fork 1.
|
||||
|
||||
// Slasher constants.
|
||||
WeakSubjectivityPeriod types.Epoch // WeakSubjectivityPeriod defines the time period expressed in number of epochs were proof of stake network should validate block headers and attestations for slashable events.
|
||||
@@ -129,16 +131,14 @@ type BeaconChainConfig struct {
|
||||
SlashingProtectionPruningEpochs types.Epoch // SlashingProtectionPruningEpochs defines a period after which all prior epochs are pruned in the validator database.
|
||||
|
||||
// Fork-related values.
|
||||
GenesisForkVersion []byte `yaml:"GENESIS_FORK_VERSION" spec:"true"` // GenesisForkVersion is used to track fork version between state transitions.
|
||||
AltairForkVersion []byte `yaml:"ALTAIR_FORK_VERSION" spec:"true"` // AltairForkVersion is used to represent the fork version for altair.
|
||||
AltairForkEpoch types.Epoch `yaml:"ALTAIR_FORK_EPOCH" spec:"true"` // AltairForkEpoch is used to represent the assigned fork epoch for altair.
|
||||
MergeForkVersion []byte `yaml:"MERGE_FORK_VERSION" spec:"true"` // MergeForkVersion is used to represent the fork version for the merge.
|
||||
MergeForkEpoch types.Epoch `yaml:"MERGE_FORK_EPOCH" spec:"true"` // MergeForkEpoch is used to represent the assigned fork epoch for the merge.
|
||||
ShardingForkVersion []byte `yaml:"SHARDING_FORK_VERSION" spec:"true"` // ShardingForkVersion is used to represent the fork version for sharding.
|
||||
ShardingForkEpoch types.Epoch `yaml:"SHARDING_FORK_EPOCH" spec:"true"` // ShardingForkEpoch is used to represent the assigned fork epoch for sharding.
|
||||
ForkVersionSchedule map[[4]byte]types.Epoch // Schedule of fork epochs by version.
|
||||
MinAnchorPowBlockDifficulty uint64 `yaml:"MIN_ANCHOR_POW_BLOCK_DIFFICULTY" spec:"true"` // MinAnchorPowBlockDifficulty specifies the target chain difficulty at the time of the merge.
|
||||
TransitionTotalDifficulty uint64 `yaml:"TRANSITION_TOTAL_DIFFICULTY" spec:"true"` // TransitionTotalDifficulty is part of the experimental merge spec. This value is not used (yet) and is expected to be a uint256.
|
||||
GenesisForkVersion []byte `yaml:"GENESIS_FORK_VERSION" spec:"true"` // GenesisForkVersion is used to track fork version between state transitions.
|
||||
AltairForkVersion []byte `yaml:"ALTAIR_FORK_VERSION" spec:"true"` // AltairForkVersion is used to represent the fork version for altair.
|
||||
AltairForkEpoch types.Epoch `yaml:"ALTAIR_FORK_EPOCH" spec:"true"` // AltairForkEpoch is used to represent the assigned fork epoch for altair.
|
||||
MergeForkVersion []byte `yaml:"MERGE_FORK_VERSION" spec:"true"` // MergeForkVersion is used to represent the fork version for the merge.
|
||||
MergeForkEpoch types.Epoch `yaml:"MERGE_FORK_EPOCH" spec:"true"` // MergeForkEpoch is used to represent the assigned fork epoch for the merge.
|
||||
ShardingForkVersion []byte `yaml:"SHARDING_FORK_VERSION" spec:"true"` // ShardingForkVersion is used to represent the fork version for sharding.
|
||||
ShardingForkEpoch types.Epoch `yaml:"SHARDING_FORK_EPOCH" spec:"true"` // ShardingForkEpoch is used to represent the assigned fork epoch for sharding.
|
||||
ForkVersionSchedule map[[4]byte]types.Epoch // Schedule of fork epochs by version.
|
||||
|
||||
// Weak subjectivity values.
|
||||
SafetyDecay uint64 // SafetyDecay is defined as the loss in the 1/3 consensus safety margin of the casper FFG mechanism.
|
||||
@@ -167,11 +167,26 @@ type BeaconChainConfig struct {
|
||||
InactivityScoreRecoveryRate uint64 `yaml:"INACTIVITY_SCORE_RECOVERY_RATE" spec:"true"` // InactivityScoreRecoveryRate for recovering score bias penalties during inactivity.
|
||||
EpochsPerSyncCommitteePeriod types.Epoch `yaml:"EPOCHS_PER_SYNC_COMMITTEE_PERIOD" spec:"true"` // EpochsPerSyncCommitteePeriod defines how many epochs per sync committee period.
|
||||
|
||||
// Updated penalty values. This moves penalty parameters toward their final, maximum security values.
|
||||
// Updated Altair penalty values. This moves penalty parameters toward their final, maximum security values.
|
||||
// Note: We do not override previous configuration values but instead creates new values and replaces usage throughout.
|
||||
InactivityPenaltyQuotientAltair uint64 `yaml:"INACTIVITY_PENALTY_QUOTIENT_ALTAIR" spec:"true"` // InactivityPenaltyQuotientAltair for penalties during inactivity post Altair hard fork.
|
||||
MinSlashingPenaltyQuotientAltair uint64 `yaml:"MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR" spec:"true"` // MinSlashingPenaltyQuotientAltair for slashing penalties post Altair hard fork.
|
||||
ProportionalSlashingMultiplierAltair uint64 `yaml:"PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR" spec:"true"` // ProportionalSlashingMultiplierAltair for slashing penalties multiplier post Alair hard fork.
|
||||
ProportionalSlashingMultiplierAltair uint64 `yaml:"PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR" spec:"true"` // ProportionalSlashingMultiplierAltair for slashing penalties' multiplier post Alair hard fork.
|
||||
|
||||
// Merge.
|
||||
EnabledMerge bool // EnabledMerge is true if merge feature is enabled.
|
||||
MaxTransactionsPerPayload uint64 `yaml:"MAX_TRANSACTIONS_PER_PAYLOAD" spec:"true"` // MaxTransactionsPerPayload of beacon chain.
|
||||
MaxBytesPerTransaction uint64 `yaml:"MAX_BYTES_PER_TRANSACTION" spec:"true"` // MaxBytesPerTransaction of beacon chain.
|
||||
TerminalBlockHash common.Hash `yaml:"TERMINAL_BLOCK_HASH" spec:"true"` // TerminalBlockHash of beacon chain.
|
||||
TerminalBlockHashActivationEpoch types.Epoch `yaml:"TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH" spec:"true"` // TerminalBlockHashActivationEpoch of beacon chain.
|
||||
TerminalTotalDifficulty uint64 `yaml:"TERMINAL_TOTAL_DIFFICULTY" spec:"true"` // TerminalTotalDifficulty is part of the experimental merge spec. This value is not used (yet) and is expected to be a uint256.
|
||||
FeeRecipient common.Address // FeeRecipient where the transaction fee goes to.
|
||||
|
||||
// Updated Merge penalty values. This moves penalty parameters toward their final, maximum security values.
|
||||
// Note: We do not override previous configuration values but instead creates new values and replaces usage throughout.
|
||||
InactivityPenaltyQuotientMerge uint64 `yaml:"INACTIVITY_PENALTY_QUOTIENT_MERGE" spec:"true"` // InactivityPenaltyQuotientMerge for penalties during inactivity post Merge hard fork.
|
||||
MinSlashingPenaltyQuotientMerge uint64 `yaml:"MIN_SLASHING_PENALTY_QUOTIENT_MERGE" spec:"true"` // MinSlashingPenaltyQuotientMerge for slashing penalties post Merge hard fork.
|
||||
ProportionalSlashingMultiplierMerge uint64 `yaml:"PROPORTIONAL_SLASHING_MULTIPLIER_MERGE" spec:"true"` // ProportionalSlashingMultiplierMerge for slashing penalties' multiplier post Merge hard fork.
|
||||
|
||||
// Light client
|
||||
MinSyncCommitteeParticipants uint64 `yaml:"MIN_SYNC_COMMITTEE_PARTICIPANTS" spec:"true"` // MinSyncCommitteeParticipants defines the minimum amount of sync committee participants for which the light client acknowledges the signature.
|
||||
@@ -185,4 +200,6 @@ func (b *BeaconChainConfig) InitializeForkSchedule() {
|
||||
b.ForkVersionSchedule[bytesutil.ToBytes4(b.GenesisForkVersion)] = b.GenesisEpoch
|
||||
// Set Altair fork data.
|
||||
b.ForkVersionSchedule[bytesutil.ToBytes4(b.AltairForkVersion)] = b.AltairForkEpoch
|
||||
// Set Merge fork data.
|
||||
b.ForkVersionSchedule[bytesutil.ToBytes4(b.MergeForkVersion)] = b.MergeForkEpoch
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ const (
|
||||
genesisForkEpoch = 0
|
||||
// Altair Fork Epoch for mainnet config.
|
||||
mainnetAltairForkEpoch = 74240 // Oct 27, 2021, 10:56:23am UTC
|
||||
// Placeholder for the merge epoch until it is decided
|
||||
mainnetMergeForkEpoch = math.MaxUint64
|
||||
)
|
||||
|
||||
var mainnetNetworkConfig = &NetworkConfig{
|
||||
@@ -178,6 +180,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
PresetBase: "mainnet",
|
||||
BeaconStateFieldCount: 21,
|
||||
BeaconStateAltairFieldCount: 24,
|
||||
BeaconStateMergeFieldCount: 25,
|
||||
|
||||
// Slasher related values.
|
||||
WeakSubjectivityPeriod: 54000,
|
||||
@@ -188,17 +191,17 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
SafetyDecay: 10,
|
||||
|
||||
// Fork related values.
|
||||
GenesisForkVersion: []byte{0, 0, 0, 0},
|
||||
AltairForkVersion: []byte{1, 0, 0, 0},
|
||||
AltairForkEpoch: mainnetAltairForkEpoch,
|
||||
MergeForkVersion: []byte{2, 0, 0, 0},
|
||||
MergeForkEpoch: math.MaxUint64,
|
||||
ShardingForkVersion: []byte{3, 0, 0, 0},
|
||||
ShardingForkEpoch: math.MaxUint64,
|
||||
MinAnchorPowBlockDifficulty: 4294967296,
|
||||
GenesisForkVersion: []byte{0, 0, 0, 0},
|
||||
AltairForkVersion: []byte{1, 0, 0, 0},
|
||||
AltairForkEpoch: mainnetAltairForkEpoch,
|
||||
MergeForkVersion: []byte{2, 0, 0, 0},
|
||||
MergeForkEpoch: mainnetMergeForkEpoch,
|
||||
ShardingForkVersion: []byte{3, 0, 0, 0},
|
||||
ShardingForkEpoch: math.MaxUint64,
|
||||
ForkVersionSchedule: map[[4]byte]types.Epoch{
|
||||
{0, 0, 0, 0}: genesisForkEpoch,
|
||||
{1, 0, 0, 0}: mainnetAltairForkEpoch,
|
||||
{2, 0, 0, 0}: mainnetMergeForkEpoch,
|
||||
// Any further forks must be specified here by their epoch number.
|
||||
},
|
||||
|
||||
@@ -226,11 +229,22 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
InactivityScoreRecoveryRate: 16,
|
||||
EpochsPerSyncCommitteePeriod: 256,
|
||||
|
||||
// Updated penalty values.
|
||||
// Updated Altair penalty values.
|
||||
InactivityPenaltyQuotientAltair: 3 * 1 << 24, //50331648
|
||||
MinSlashingPenaltyQuotientAltair: 64,
|
||||
ProportionalSlashingMultiplierAltair: 2,
|
||||
|
||||
// Merge.
|
||||
MaxTransactionsPerPayload: 1 << 20,
|
||||
MaxBytesPerTransaction: 1 << 30,
|
||||
TerminalBlockHash: [32]byte{},
|
||||
TerminalBlockHashActivationEpoch: 1<<64 - 1,
|
||||
|
||||
// Updated Merge penalty values.
|
||||
InactivityPenaltyQuotientMerge: 1 << 24,
|
||||
MinSlashingPenaltyQuotientMerge: 32,
|
||||
ProportionalSlashingMultiplierMerge: 3,
|
||||
|
||||
// Light client
|
||||
MinSyncCommitteeParticipants: 1,
|
||||
}
|
||||
|
||||
@@ -96,6 +96,7 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
||||
minimalConfig.ForkVersionSchedule = map[[4]byte]types.Epoch{
|
||||
{0, 0, 0, 1}: 0,
|
||||
{1, 0, 0, 1}: math.MaxUint64,
|
||||
{2, 0, 0, 1}: math.MaxUint64,
|
||||
}
|
||||
minimalConfig.SyncCommitteeSize = 32
|
||||
minimalConfig.InactivityScoreBias = 4
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
package params
|
||||
|
||||
const altairE2EForkEpoch = 6
|
||||
import "math"
|
||||
|
||||
const (
|
||||
altairE2EForkEpoch = 6
|
||||
MergeE2EForkEpoch = math.MaxUint64
|
||||
)
|
||||
|
||||
// UseE2EConfig for beacon chain services.
|
||||
func UseE2EConfig() {
|
||||
@@ -37,6 +42,9 @@ func E2ETestConfig() *BeaconChainConfig {
|
||||
// Altair Fork Parameters.
|
||||
e2eConfig.AltairForkEpoch = altairE2EForkEpoch
|
||||
|
||||
// Merge Fork Parameters.
|
||||
//e2eConfig.MergeForkVersion = MergeE2EForkEpoch // TODO_MERGE: Add a proper merge fork version when e2e is ready for it.
|
||||
|
||||
// Prysm constants.
|
||||
e2eConfig.ConfigName = ConfigNames[EndToEnd]
|
||||
|
||||
|
||||
42
config/params/testnet_merge_config.go
Normal file
42
config/params/testnet_merge_config.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package params
|
||||
|
||||
import "math"
|
||||
|
||||
// UseMergeTestNetworkConfig uses the Merge specific
|
||||
// network config.
|
||||
func UseMergeTestNetworkConfig() {
|
||||
cfg := BeaconNetworkConfig().Copy()
|
||||
cfg.ContractDeploymentBlock = 0
|
||||
cfg.BootstrapNodes = []string{
|
||||
"enr:-Iq4QKuNB_wHmWon7hv5HntHiSsyE1a6cUTK1aT7xDSU_hNTLW3R4mowUboCsqYoh1kN9v3ZoSu_WuvW9Aw0tQ0Dxv6GAXxQ7Nv5gmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk",
|
||||
}
|
||||
OverrideBeaconNetworkConfig(cfg)
|
||||
}
|
||||
|
||||
// UseMergeTestConfig sets the main beacon chain
|
||||
// config for Merge testnet.
|
||||
func UseMergeTestConfig() {
|
||||
beaconConfig = MergeTestnetConfig()
|
||||
}
|
||||
|
||||
// MergeTestnetConfig defines the config for the
|
||||
// Merge testnet.
|
||||
func MergeTestnetConfig() *BeaconChainConfig {
|
||||
cfg := MainnetConfig().Copy()
|
||||
cfg.MinGenesisTime = 1634212800
|
||||
cfg.GenesisDelay = 300
|
||||
cfg.ConfigName = "Merge"
|
||||
cfg.GenesisForkVersion = []byte{0x10, 0x00, 0x00, 0x69}
|
||||
cfg.AltairForkVersion = []byte{0x11, 0x00, 0x00, 0x70}
|
||||
cfg.AltairForkEpoch = 4
|
||||
cfg.MergeForkVersion = []byte{0x12, 0x00, 0x00, 0x71}
|
||||
cfg.MergeForkEpoch = 10
|
||||
cfg.TerminalTotalDifficulty = 60000000
|
||||
cfg.ShardingForkVersion = []byte{0x03, 0x00, 0x00, 0x00}
|
||||
cfg.ShardingForkEpoch = math.MaxUint64
|
||||
cfg.SecondsPerETH1Block = 14
|
||||
cfg.DepositChainID = 1337202
|
||||
cfg.DepositNetworkID = 1337202
|
||||
cfg.DepositContractAddress = "0x4242424242424242424242424242424242424242"
|
||||
return cfg
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package params
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
eth1Params "github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@@ -40,10 +42,10 @@ func PraterConfig() *BeaconChainConfig {
|
||||
cfg.DepositChainID = eth1Params.GoerliChainConfig.ChainID.Uint64()
|
||||
cfg.DepositNetworkID = eth1Params.GoerliChainConfig.ChainID.Uint64()
|
||||
cfg.AltairForkEpoch = 36660
|
||||
cfg.MergeForkEpoch = math.MaxUint64 // TODO_MERGE: Add Prater merge epoch when merge is tried in it.
|
||||
cfg.AltairForkVersion = []byte{0x1, 0x0, 0x10, 0x20}
|
||||
cfg.ShardingForkVersion = []byte{0x3, 0x0, 0x10, 0x20}
|
||||
cfg.MergeForkVersion = []byte{0x2, 0x0, 0x10, 0x20}
|
||||
cfg.TransitionTotalDifficulty = 4294967296
|
||||
//cfg.TerminalTotalDifficulty = common.Hex2Bytes("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC00")
|
||||
cfg.DepositContractAddress = "0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b"
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func PyrmontConfig() *BeaconChainConfig {
|
||||
cfg.AltairForkVersion = []byte{0x01, 0x00, 0x20, 0x09}
|
||||
cfg.AltairForkEpoch = 61650
|
||||
cfg.MergeForkVersion = []byte{0x02, 0x00, 0x20, 0x09}
|
||||
cfg.MergeForkEpoch = math.MaxUint64
|
||||
cfg.MergeForkEpoch = math.MaxUint64 // TODO_MERGE: Add Pyrmont merge epoch if merge is ever tried in it.
|
||||
cfg.ShardingForkVersion = []byte{0x03, 0x00, 0x20, 0x09}
|
||||
cfg.ShardingForkEpoch = math.MaxUint64
|
||||
cfg.SecondsPerETH1Block = 14
|
||||
|
||||
23
deps.bzl
23
deps.bzl
@@ -818,8 +818,9 @@ def prysm_deps():
|
||||
importpath = "github.com/ethereum/go-ethereum",
|
||||
patch_args = ["-p1"],
|
||||
patches = ["//third_party:com_github_ethereum_go_ethereum_secp256k1.patch"],
|
||||
sum = "h1:Ft2GcLQrr2M89l49g9NoqgNtJZ9AahzMb7N6VXKZy5U=",
|
||||
version = "v1.10.10",
|
||||
replace = "github.com/MariusVanDerWijden/go-ethereum",
|
||||
sum = "h1:Zq7waRROmrrCpR3L12lfEcGcDzASNtDVgm/jXpVuVRM=",
|
||||
version = "v1.8.22-0.20211106132821-98240256ee51",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -837,10 +838,14 @@ def prysm_deps():
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_ferranbt_fastssz",
|
||||
importpath = "github.com/ferranbt/fastssz",
|
||||
importpath = "github.com/ferranbt/fastssz", # keep
|
||||
commit = "fa514f0ef27e963d281ecebff240b36169c2b9e3", # keep
|
||||
remote = "https://github.com/kasey/fastssz", # keep
|
||||
nofuzz = True,
|
||||
sum = "h1:6dVcS0LktRSyEEgldFY4N9J17WjUoiJStttH+RZj0Wo=",
|
||||
version = "v0.0.0-20210905181407-59cf6761a7d5",
|
||||
replace = None, # keep
|
||||
sum = None, # keep
|
||||
vcs = "git", # keep
|
||||
version = None, # keep
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -1153,6 +1158,7 @@ def prysm_deps():
|
||||
sum = "h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=",
|
||||
version = "v0.0.4",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_golangci_lint_1",
|
||||
importpath = "github.com/golangci/lint-1",
|
||||
@@ -2348,6 +2354,7 @@ def prysm_deps():
|
||||
sum = "h1:3l11YT8tm9MnwGFQ4kETwkzpAwY2Jt9lCrumCUW4+z4=",
|
||||
version = "v0.7.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_marten_seemann_qpack",
|
||||
importpath = "github.com/marten-seemann/qpack",
|
||||
@@ -2685,12 +2692,6 @@ def prysm_deps():
|
||||
version = "v1.0.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_nbutton23_zxcvbn_go",
|
||||
importpath = "github.com/nbutton23/zxcvbn-go",
|
||||
sum = "h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=",
|
||||
version = "v0.0.0-20180912185939-ae427f1e4c1d",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_neelance_astrewrite",
|
||||
importpath = "github.com/neelance/astrewrite",
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_minio_sha256_simd//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
@@ -90,3 +92,34 @@ func SlashingsRoot(slashings []uint64) ([32]byte, error) {
|
||||
}
|
||||
return BitwiseMerkleize(hash.CustomSHA256Hasher(), slashingChunks, uint64(len(slashingChunks)), uint64(len(slashingChunks)))
|
||||
}
|
||||
|
||||
const (
|
||||
maxBytesPerTransaction = 1073741824
|
||||
maxTransactionsPerPayload = 1048576
|
||||
)
|
||||
|
||||
// TransactionsRoot computes the HTR for the Transactions property of the ExecutionPayload
|
||||
// The code was largely copy/pasted from the code generated to compute the HTR of the entire
|
||||
// ExecutionPayload.
|
||||
func TransactionsRoot(txs [][]byte) ([32]byte, error) {
|
||||
var root [32]byte
|
||||
hh := ssz.DefaultHasherPool.Get()
|
||||
defer ssz.DefaultHasherPool.Put(hh)
|
||||
idx := hh.Index()
|
||||
num := uint64(len(txs))
|
||||
if num > maxTransactionsPerPayload {
|
||||
return root, ssz.ErrIncorrectListSize
|
||||
}
|
||||
for _, elem := range txs {
|
||||
elemIndx := hh.Index()
|
||||
byteLen := uint64(len(elem))
|
||||
if byteLen > maxBytesPerTransaction {
|
||||
return root, ssz.ErrIncorrectListSize
|
||||
}
|
||||
hh.Append(elem)
|
||||
hh.MerkleizeWithMixin(elemIndx, byteLen, (maxBytesPerTransaction+31)/32)
|
||||
}
|
||||
hh.MerkleizeWithMixin(idx, num, maxTransactionsPerPayload)
|
||||
|
||||
return hh.HashRoot()
|
||||
}
|
||||
|
||||
6
go.mod
6
go.mod
@@ -40,6 +40,7 @@ require (
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.0.1
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
|
||||
github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e
|
||||
github.com/holiman/uint256 v1.2.0
|
||||
github.com/ianlancetaylor/cgosymbolizer v0.0.0-20200424224625-be1b05b0b279
|
||||
github.com/ipfs/go-ipfs-addr v0.0.1
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
@@ -123,7 +124,12 @@ require (
|
||||
k8s.io/utils v0.0.0-20200520001619-278ece378a50 // indirect
|
||||
)
|
||||
|
||||
replace github.com/ethereum/go-ethereum => github.com/MariusVanDerWijden/go-ethereum v1.8.22-0.20211106132821-98240256ee51
|
||||
|
||||
replace github.com/json-iterator/go => github.com/prestonvanloon/go v1.1.7-0.20190722034630-4f2e55fcf87b
|
||||
|
||||
// See https://github.com/prysmaticlabs/grpc-gateway/issues/2
|
||||
replace github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20210702154020-550e1cd83ec1
|
||||
|
||||
// fa514f0ef27e963d281ecebff240b36169c2b9e3
|
||||
replace github.com/ferranbt/fastssz => github.com/kasey/fastssz v0.0.0-20211108224242-fa514f0ef27e
|
||||
|
||||
9
go.sum
9
go.sum
@@ -67,6 +67,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
|
||||
github.com/MariusVanDerWijden/go-ethereum v1.8.22-0.20211106132821-98240256ee51 h1:Zq7waRROmrrCpR3L12lfEcGcDzASNtDVgm/jXpVuVRM=
|
||||
github.com/MariusVanDerWijden/go-ethereum v1.8.22-0.20211106132821-98240256ee51/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
@@ -267,15 +269,10 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/ethereum/go-ethereum v1.10.10 h1:Ft2GcLQrr2M89l49g9NoqgNtJZ9AahzMb7N6VXKZy5U=
|
||||
github.com/ethereum/go-ethereum v1.10.10/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/ferranbt/fastssz v0.0.0-20210120143747-11b9eff30ea9/go.mod h1:DyEu2iuLBnb/T51BlsiO3yLYdJC6UbGMrIkqK1KmQxM=
|
||||
github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5 h1:6dVcS0LktRSyEEgldFY4N9J17WjUoiJStttH+RZj0Wo=
|
||||
github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
@@ -634,6 +631,8 @@ github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1
|
||||
github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 h1:0VWDXPNE0brOek1Q8bLfzKkvOzwbQE/snjGojlCr8CY=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kasey/fastssz v0.0.0-20211108224242-fa514f0ef27e h1:eLhcqPvSCiBeryzyEJ5SwNE6jzJFxDTb2Hph0HoOs9Y=
|
||||
github.com/kasey/fastssz v0.0.0-20211108224242-fa514f0ef27e/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4=
|
||||
github.com/kevinms/leakybucket-go v0.0.0-20200115003610-082473db97ca h1:qNtd6alRqd3qOdPrKXMZImV192ngQ0WSh1briEO33Tk=
|
||||
github.com/kevinms/leakybucket-go v0.0.0-20200115003610-082473db97ca/go.mod h1:ph+C5vpnCcQvKBwJwKLTK3JLNGnBXYlG7m7JjoC/zYA=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
|
||||
@@ -132,6 +132,14 @@
|
||||
"tools/analyzers/slicedirect/testdata/slice.go": "Analyzer testdata has to break rules"
|
||||
}
|
||||
},
|
||||
"loopclosure": {
|
||||
"only_files": {
|
||||
"beacon-chain/.*": "",
|
||||
"shared/.*": "",
|
||||
"slasher/.*": "",
|
||||
"validator/.*": ""
|
||||
}
|
||||
},
|
||||
"ineffassign": {
|
||||
"only_files": {
|
||||
"beacon-chain/.*": "",
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
github_com_prysmaticlabs_eth2_types "github.com/prysmaticlabs/eth2-types"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
github_com_prysmaticlabs_eth2_types "github.com/prysmaticlabs/eth2-types"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user