mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
23 Commits
race-condi
...
mick-bazel
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3bbaa5e9c5 | ||
|
|
9fab9df61e | ||
|
|
eedafac822 | ||
|
|
e9ad7aeff8 | ||
|
|
3cfef20938 | ||
|
|
e90284bc00 | ||
|
|
01e15a033f | ||
|
|
f09b06d6f6 | ||
|
|
16e66ee1b8 | ||
|
|
3d3890205f | ||
|
|
a90335b15e | ||
|
|
8cd43d216f | ||
|
|
d25c0ec1a5 | ||
|
|
e1c4427ea5 | ||
|
|
7042791e31 | ||
|
|
e771585b77 | ||
|
|
98622a052f | ||
|
|
61033ebea1 | ||
|
|
e808025b17 | ||
|
|
7db0435ee0 | ||
|
|
1f086e4333 | ||
|
|
184e5be9de | ||
|
|
e33850bf51 |
@@ -1 +0,0 @@
|
||||
5.0.0
|
||||
59
README_WINDOWS.md
Normal file
59
README_WINDOWS.md
Normal file
@@ -0,0 +1,59 @@
|
||||
- install wsl 2 on your windows machine, verify it's wsl 2 and not 1
|
||||
|
||||
> sudo apt install npm
|
||||
|
||||
- can't install npm
|
||||
|
||||
> export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
|
||||
- then follow guidance here https://docs.prylabs.network/docs/install/install-with-bazel/
|
||||
- version mismatch error, even when forcing the .bazelversion version to be installed
|
||||
- delete .bazelversion, just make sure you keep WSL's bazel version in sync with the one specified in .bazelversion
|
||||
|
||||
> sudo apt install bazel --version=5.0.0
|
||||
> bazel build //beacon-chain:beacon-chain --config=release
|
||||
|
||||
- workspace_status.sh not found
|
||||
- use --workspace_status_command=/bin/true per https://docs.bazel.build/versions/main/user-manual.html#workspace_status
|
||||
|
||||
> bazel build //cmd/beacon-chain:beacon-chain --config=release --workspace_status_command=/bin/true
|
||||
|
||||
- error related to https://stackoverflow.com/questions/48674104/clang-error-while-loading-shared-libraries-libtinfo-so-5-cannot-open-shared-o
|
||||
|
||||
> sudo apt install libncurses5
|
||||
> bazel build //cmd/beacon-chain:beacon-chain --config=release --workspace_status_command=/bin/true
|
||||
|
||||
- 'gmpxx.h' file not found
|
||||
|
||||
> sudo apt-get install libgmp-dev
|
||||
> bazel build //cmd/beacon-chain:beacon-chain --config=release --workspace_status_command=/bin/true
|
||||
|
||||
works!
|
||||
|
||||
start geth on localhost:8545
|
||||
|
||||
> bazel run //beacon-chain --config=release -- --http-web3provider=http://localhost:8545
|
||||
|
||||
works!
|
||||
|
||||
open vs code with prysm code
|
||||
|
||||
if you see errors, make sure you install https://www.mingw-w64.org/downloads/#mingw-builds
|
||||
|
||||
I used https://www.msys2.org/ and followed the instructions on that page
|
||||
in combination with instructions on this page https://code.visualstudio.com/docs/cpp/config-mingw#_prerequisites
|
||||
|
||||
blst errors... try --define=blst_modern=true
|
||||
|
||||
bazel build //validator:validator --config=release --workspace_status_command=/bin/true --define=blst_modern=true
|
||||
|
||||
-----
|
||||
|
||||
> bazel build //cmd/beacon-chain:beacon-chain --workspace_status_command=/bin/true --define=blst_modern=true
|
||||
> bazel build //cmd/validator:validator --workspace_status_command=/bin/true --define=blst_modern=true
|
||||
|
||||
---
|
||||
|
||||
then issue this command bazel run //beacon-chain -- --datadir /tmp/chaindata --force-clear-db --interop-genesis-state /tmp/genesis.ssz --interop-eth1data-votes --min-sync-peers=0 --http-web3provider=http://localhost:8545 --deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 --bootstrap-node= --chain-config-file=/tmp/merge.yml
|
||||
|
||||
|
||||
@@ -137,6 +137,7 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
@@ -188,6 +189,7 @@ go_test(
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves chain info related data.
|
||||
// directly retrieve chain info related data.
|
||||
type ChainInfoFetcher interface {
|
||||
HeadFetcher
|
||||
FinalizationFetcher
|
||||
@@ -49,7 +49,7 @@ type GenesisFetcher interface {
|
||||
}
|
||||
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves head related data.
|
||||
// directly retrieve head related data.
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() types.Slot
|
||||
HeadRoot(ctx context.Context) ([]byte, error)
|
||||
@@ -61,8 +61,6 @@ type HeadFetcher interface {
|
||||
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
|
||||
ChainHeads() ([][32]byte, []types.Slot)
|
||||
IsOptimistic(ctx context.Context) (bool, error)
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
@@ -79,7 +77,7 @@ type CanonicalFetcher interface {
|
||||
}
|
||||
|
||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves finalization and justification related data.
|
||||
// directly retrieve finalization and justification related data.
|
||||
type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
@@ -87,6 +85,12 @@ type FinalizationFetcher interface {
|
||||
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
|
||||
}
|
||||
|
||||
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
||||
type OptimisticModeFetcher interface {
|
||||
IsOptimistic(ctx context.Context) (bool, error)
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.store.FinalizedCheckpt()
|
||||
@@ -238,7 +242,7 @@ func (s *Service) GenesisTime() time.Time {
|
||||
return s.genesisTime
|
||||
}
|
||||
|
||||
// GenesisValidatorsRoot returns the genesis validator
|
||||
// GenesisValidatorsRoot returns the genesis validators
|
||||
// root of the chain.
|
||||
func (s *Service) GenesisValidatorsRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
@@ -305,7 +309,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
// ForkChoicer returns the forkchoice interface
|
||||
// ForkChoicer returns the forkchoice interface.
|
||||
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
@@ -321,7 +325,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
return s.IsOptimisticForRoot(ctx, s.head.root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||
@@ -351,7 +355,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -369,7 +373,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, err
|
||||
}
|
||||
|
||||
// historical non-canonical blocks here are returned as optimistic for safety.
|
||||
// Historical non-canonical blocks here are returned as optimistic for safety.
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
@@ -378,7 +382,7 @@ func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
}
|
||||
|
||||
// ForkChoiceStore returns the fork choice store in the service
|
||||
// ForkChoiceStore returns the fork choice store in the service.
|
||||
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
@@ -11,8 +11,6 @@ var (
|
||||
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
|
||||
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
|
||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
||||
// errNilParentInDB is returned when a nil parent block is returned from the DB.
|
||||
errNilParentInDB = errors.New("nil parent block in DB")
|
||||
// errWrongBlockCount is returned when the wrong number of blocks or
|
||||
// block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
|
||||
@@ -235,14 +235,10 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk interfaces.B
|
||||
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
|
||||
parent, err := s.getBlock(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parent == nil || parent.IsNil() {
|
||||
return errNilParentInDB
|
||||
}
|
||||
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -282,7 +278,10 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Error("Fee recipient not set. Using burn address")
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
case err != nil:
|
||||
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
|
||||
|
||||
@@ -770,7 +770,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
|
||||
@@ -40,7 +40,7 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update head")
|
||||
}
|
||||
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
headBlock, err := s.getBlock(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -86,7 +86,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
// re-initiate fork choice store using the latest justified info.
|
||||
// This recovers a fatal condition and should not happen in run time.
|
||||
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
|
||||
jb, err := s.getBlock(ctx, headStartRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@@ -355,7 +355,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
// attestation pool. It also filters out the attestations that is one epoch older as a
|
||||
// defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
|
||||
orphanedBlk, err := s.cfg.BeaconDB.Block(ctx, orphanedRoot)
|
||||
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
}
|
||||
}
|
||||
|
||||
// warning: only use these opts when you are certain there are no db calls
|
||||
// WARNING: only use these opts when you are certain there are no db calls
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
|
||||
@@ -123,7 +123,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
@@ -146,9 +148,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
|
||||
@@ -97,13 +97,10 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
finalizedBlkSigned, err := s.cfg.BeaconDB.Block(ctx, fRoot)
|
||||
finalizedBlkSigned, err := s.getBlock(ctx, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalizedBlkSigned == nil || finalizedBlkSigned.IsNil() || finalizedBlkSigned.Block().IsNil() {
|
||||
return errors.New("nil finalized block")
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block()
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot())
|
||||
if err != nil {
|
||||
@@ -358,7 +355,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
higherThanFinalized := slot > fSlot
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
|
||||
b, err := s.getBlock(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -39,6 +40,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock_ProtoArray(t *testing.T) {
|
||||
@@ -1325,7 +1327,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
args: args{
|
||||
finalizedRoot: [32]byte{'a'},
|
||||
},
|
||||
wantedErr: "nil finalized block",
|
||||
wantedErr: "block not found in cache or db",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block root in DB",
|
||||
@@ -1891,3 +1893,82 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service.insertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
|
||||
}
|
||||
|
||||
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
|
||||
blk1, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb1, err := wrapper.WrappedSignedBeaconBlock(blk1)
|
||||
require.NoError(t, err)
|
||||
blk2, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb2, err := wrapper.WrappedSignedBeaconBlock(blk2)
|
||||
require.NoError(t, err)
|
||||
blk3, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb3, err := wrapper.WrappedSignedBeaconBlock(blk3)
|
||||
require.NoError(t, err)
|
||||
blk4, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 4)
|
||||
require.NoError(t, err)
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb4, err := wrapper.WrappedSignedBeaconBlock(blk4)
|
||||
require.NoError(t, err)
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
for i := 0; i < 10; i++ {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb1, r1))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb2, r2))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb3, r3))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb4, r4))
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
require.LogsDoNotContain(t, logHook, "New head does not exist in DB. Do nothing")
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r1))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'a'})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,7 +176,7 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
|
||||
if s.headRoot() == newHeadRoot {
|
||||
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -199,6 +199,11 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, types.ValidatorIndex(1), vId)
|
||||
require.Equal(t, [8]byte{1}, payloadID)
|
||||
|
||||
// Test zero headRoot returns immediately.
|
||||
headRoot := service.headRoot()
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{})
|
||||
require.Equal(t, service.headRoot(), headRoot)
|
||||
}
|
||||
|
||||
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
|
||||
@@ -201,13 +201,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
forkChoicer = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
|
||||
}
|
||||
s.cfg.ForkChoiceStore = forkChoicer
|
||||
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
fb, err := s.getBlock(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
if fb == nil || fb.IsNil() {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
payloadHash, err := getBlockPayloadHash(fb.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload hash")
|
||||
@@ -339,14 +336,13 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
finalizedState.Slot(), flags.HeadSync.Name)
|
||||
}
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
if finalizedState == nil || finalizedState.IsNil() {
|
||||
return errors.New("finalized state can't be nil")
|
||||
}
|
||||
|
||||
if finalizedState == nil || finalizedState.IsNil() || finalizedBlock == nil || finalizedBlock.IsNil() {
|
||||
return errors.New("finalized state and block can't be nil")
|
||||
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -86,9 +87,11 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
require.NoError(t, err)
|
||||
err = beaconDB.SavePowchainData(ctx, ðpb.ETH1ChainData{
|
||||
BeaconState: pbState,
|
||||
Trie: ðpb.SparseMerkleTrie{},
|
||||
Trie: mockTrie.ToProto(),
|
||||
CurrentEth1Data: ðpb.LatestETH1Data{
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
|
||||
@@ -37,7 +37,7 @@ func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
|
||||
// the previously read value. This cache assumes we only want to cache one
|
||||
// set of balances for a single root (the current justified root).
|
||||
//
|
||||
// warning: this is not thread-safe on its own, relies on get() for locking
|
||||
// WARNING: this is not thread-safe on its own, relies on get() for locking
|
||||
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
stateBalanceCacheMiss.Inc()
|
||||
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
|
||||
@@ -203,3 +203,26 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
// This is a regression test (10565)
|
||||
// ----- C
|
||||
// /
|
||||
// A <- B
|
||||
// \
|
||||
// ----------D
|
||||
// D is invalid
|
||||
|
||||
func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, 1, 1))
|
||||
|
||||
_, err := f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
|
||||
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -111,35 +111,37 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
|
||||
// Insert a second block at slot 3 into the tree and boost its score.
|
||||
// Insert a second block at slot 4 into the tree and boost its score.
|
||||
// 0
|
||||
// |
|
||||
// 1
|
||||
// |
|
||||
// 2
|
||||
// / \
|
||||
// 3 4 <- HEAD
|
||||
slot = types.Slot(3)
|
||||
// 3 |
|
||||
// 4 <- HEAD
|
||||
slot = types.Slot(4)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
indexToHash(2),
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
clockSlot := types.Slot(3)
|
||||
clockSlot := types.Slot(4)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: newRoot,
|
||||
BlockSlot: slot,
|
||||
CurrentSlot: clockSlot,
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -166,17 +168,27 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
//
|
||||
// In this case, we have a small fork:
|
||||
//
|
||||
// (A: 54) -> (B: 44) -> (C: 34)
|
||||
// (A: 54) -> (B: 44) -> (C: 10)
|
||||
// \_->(D: 24)
|
||||
//
|
||||
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
|
||||
// middle instead of the normal progression of (44 -> 34 -> 24).
|
||||
// middle instead of the normal progression of (54 -> 44 -> 24).
|
||||
node1 := f.store.nodeByRoot[indexToHash(1)]
|
||||
require.Equal(t, node1.weight, uint64(54))
|
||||
node2 := f.store.nodeByRoot[indexToHash(2)]
|
||||
require.Equal(t, node2.weight, uint64(44))
|
||||
node3 := f.store.nodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node3.weight, uint64(24))
|
||||
node3 := f.store.nodeByRoot[indexToHash(3)]
|
||||
require.Equal(t, node3.weight, uint64(10))
|
||||
node4 := f.store.nodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node4.weight, uint64(24))
|
||||
|
||||
// Regression: process attestations for C, check that it
|
||||
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
|
||||
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
|
||||
|
||||
})
|
||||
t.Run("vanilla ex ante attack", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
@@ -24,6 +24,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
|
||||
// SetOptimisticToValid is called with the root of a block that was returned as
|
||||
// VALID by the EL.
|
||||
//
|
||||
// WARNING: This method returns an error if the root is not found in forkchoice
|
||||
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
|
||||
f.store.nodesLock.Lock()
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -111,29 +111,30 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
|
||||
// Insert a second block at slot 3 into the tree and boost its score.
|
||||
// Insert a second block at slot 4 into the tree and boost its score.
|
||||
// 0
|
||||
// |
|
||||
// 1
|
||||
// |
|
||||
// 2
|
||||
// / \
|
||||
// 3 4 <- HEAD
|
||||
slot = types.Slot(3)
|
||||
// 3 |
|
||||
// 4 <- HEAD
|
||||
slot = types.Slot(4)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
indexToHash(2),
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
clockSlot := types.Slot(3)
|
||||
clockSlot := types.Slot(4)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: newRoot,
|
||||
BlockSlot: slot,
|
||||
@@ -166,14 +167,22 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
//
|
||||
// In this case, we have a small fork:
|
||||
//
|
||||
// (A: 54) -> (B: 44) -> (C: 24)
|
||||
// \_->(D: 10)
|
||||
// (A: 54) -> (B: 44) -> (C: 10)
|
||||
// \_->(D: 24)
|
||||
//
|
||||
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
|
||||
// middle instead of the normal progression of (44 -> 34 -> 24).
|
||||
// middle instead of the normal progression of (54 -> 44 -> 24).
|
||||
require.Equal(t, f.store.nodes[1].weight, uint64(54))
|
||||
require.Equal(t, f.store.nodes[2].weight, uint64(44))
|
||||
require.Equal(t, f.store.nodes[3].weight, uint64(34))
|
||||
require.Equal(t, f.store.nodes[3].weight, uint64(10))
|
||||
require.Equal(t, f.store.nodes[4].weight, uint64(24))
|
||||
|
||||
// Regression: process attestations for C, check that it
|
||||
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
|
||||
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
|
||||
})
|
||||
t.Run("vanilla ex ante attack", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -117,7 +118,18 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
if !common.IsHexAddress(ha) {
|
||||
return fmt.Errorf("%s is not a valid fee recipient address", ha)
|
||||
}
|
||||
c.DefaultFeeRecipient = common.HexToAddress(ha)
|
||||
mixedcaseAddress, err := common.NewMixedcaseAddressFromString(ha)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not decode fee recipient %s", ha)
|
||||
}
|
||||
checksumAddress := common.HexToAddress(ha)
|
||||
if !mixedcaseAddress.ValidChecksum() {
|
||||
log.Warnf("Fee recipient %s is not a checksum Ethereum address. "+
|
||||
"The checksummed address is %s and will be used as the fee recipient. "+
|
||||
"We recommend using a mixed-case address (checksum) "+
|
||||
"to prevent spelling mistakes in your fee recipient Ethereum address", ha, checksumAddress.Hex())
|
||||
}
|
||||
c.DefaultFeeRecipient = checksumAddress
|
||||
params.OverrideBeaconConfig(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -87,6 +87,7 @@ func TestConfigureProofOfWork(t *testing.T) {
|
||||
|
||||
func TestConfigureExecutionSetting(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
@@ -102,11 +103,15 @@ func TestConfigureExecutionSetting(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
|
||||
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
|
||||
assert.LogsContain(t, hook,
|
||||
"is not a checksum Ethereum address",
|
||||
)
|
||||
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err = configureExecutionSetting(cliCtx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
assert.Equal(t, common.HexToAddress("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
|
||||
}
|
||||
|
||||
func TestConfigureNetwork(t *testing.T) {
|
||||
|
||||
@@ -788,6 +788,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
|
||||
@@ -54,7 +54,9 @@ func init() {
|
||||
for k, v := range gossipTopicMappings {
|
||||
GossipTypeMapping[reflect.TypeOf(v)] = k
|
||||
}
|
||||
// Specially handle Altair Objects.
|
||||
// Specially handle Altair objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Bellatrix objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBlindedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -81,8 +81,6 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
|
||||
switch result.Status {
|
||||
case pb.PayloadStatus_INVALID_BLOCK_HASH:
|
||||
return nil, fmt.Errorf("could not validate block hash: %v", result.ValidationError)
|
||||
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
return nil, fmt.Errorf("could not satisfy terminal block condition: %v", result.ValidationError)
|
||||
case pb.PayloadStatus_ACCEPTED, pb.PayloadStatus_SYNCING:
|
||||
return nil, ErrAcceptedSyncingPayloadStatus
|
||||
case pb.PayloadStatus_INVALID:
|
||||
@@ -119,8 +117,6 @@ func (s *Service) ForkchoiceUpdated(
|
||||
}
|
||||
resp := result.Status
|
||||
switch resp.Status {
|
||||
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
return nil, nil, fmt.Errorf("could not satisfy terminal block condition: %v", resp.ValidationError)
|
||||
case pb.PayloadStatus_SYNCING:
|
||||
return nil, nil, ErrAcceptedSyncingPayloadStatus
|
||||
case pb.PayloadStatus_INVALID:
|
||||
|
||||
@@ -217,27 +217,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
SafeBlockHash: []byte("safe"),
|
||||
FinalizedBlockHash: []byte("finalized"),
|
||||
}
|
||||
payloadAttributes := &pb.PayloadAttributes{
|
||||
Timestamp: 1,
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
want, ok := fix["ForkchoiceUpdatedInvalidTerminalBlockResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
|
||||
require.ErrorContains(t, "could not satisfy terminal block condition", err)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" VALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -274,18 +253,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorContains(t, "could not validate block hash", err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidTerminalBlockStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
require.ErrorContains(t, "could not satisfy terminal block condition", err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -819,13 +786,6 @@ func fixtures() map[string]interface{} {
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
forkChoiceInvalidTerminalBlockResp := &ForkchoiceUpdatedResponse{
|
||||
Status: &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
|
||||
LatestValidHash: nil,
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
forkChoiceAcceptedResp := &ForkchoiceUpdatedResponse{
|
||||
Status: &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_ACCEPTED,
|
||||
@@ -856,10 +816,6 @@ func fixtures() map[string]interface{} {
|
||||
Status: pb.PayloadStatus_INVALID_BLOCK_HASH,
|
||||
LatestValidHash: nil,
|
||||
}
|
||||
inValidTerminalBlockStatus := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
|
||||
LatestValidHash: nil,
|
||||
}
|
||||
acceptedStatus := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_ACCEPTED,
|
||||
LatestValidHash: nil,
|
||||
@@ -877,21 +833,19 @@ func fixtures() map[string]interface{} {
|
||||
LatestValidHash: foo[:],
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"InvalidTerminalBlockStatus": inValidTerminalBlockStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
"SyncingStatus": syncingStatus,
|
||||
"InvalidStatus": invalidStatus,
|
||||
"UnknownStatus": unknownStatus,
|
||||
"ForkchoiceUpdatedResponse": forkChoiceResp,
|
||||
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
|
||||
"ForkchoiceUpdatedInvalidTerminalBlockResponse": forkChoiceInvalidTerminalBlockResp,
|
||||
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
|
||||
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
"SyncingStatus": syncingStatus,
|
||||
"InvalidStatus": invalidStatus,
|
||||
"UnknownStatus": unknownStatus,
|
||||
"ForkchoiceUpdatedResponse": forkChoiceResp,
|
||||
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
|
||||
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
|
||||
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -767,9 +767,12 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
if eth1DataInDB == nil {
|
||||
return nil
|
||||
}
|
||||
s.depositTrie = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
s.chainStartData = eth1DataInDB.ChainstartData
|
||||
var err error
|
||||
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.chainStartData = eth1DataInDB.ChainstartData
|
||||
if !reflect.ValueOf(eth1DataInDB.BeaconState).IsZero() {
|
||||
s.preGenesisState, err = v1.InitializeFromProto(eth1DataInDB.BeaconState)
|
||||
if err != nil {
|
||||
|
||||
@@ -16,44 +16,78 @@ import (
|
||||
"github.com/r3labs/sse"
|
||||
)
|
||||
|
||||
const (
|
||||
versionHeader = "Eth-Consensus-Version"
|
||||
grpcVersionHeader = "Grpc-metadata-Eth-Consensus-Version"
|
||||
)
|
||||
|
||||
type sszConfig struct {
|
||||
sszPath string
|
||||
fileName string
|
||||
responseJson sszResponseJson
|
||||
responseJson sszResponse
|
||||
}
|
||||
|
||||
func handleGetBeaconStateSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
sszPath: "/eth/v1/debug/beacon/states/{state_id}/ssz",
|
||||
fileName: "beacon_state.ssz",
|
||||
responseJson: &beaconStateSSZResponseJson{},
|
||||
responseJson: &sszResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
sszPath: "/eth/v1/beacon/blocks/{block_id}/ssz",
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &blockSSZResponseJson{},
|
||||
responseJson: &sszResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconStateSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
sszPath: "/eth/v2/debug/beacon/states/{state_id}/ssz",
|
||||
fileName: "beacon_state.ssz",
|
||||
responseJson: &beaconStateSSZResponseV2Json{},
|
||||
responseJson: &versionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconBlockSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
sszPath: "/eth/v2/beacon/blocks/{block_id}/ssz",
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &blockSSZResponseV2Json{},
|
||||
responseJson: &versionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleSubmitBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
return handlePostSSZ(m, endpoint, w, req, sszConfig{})
|
||||
}
|
||||
|
||||
func handleSubmitBlindedBlockSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (handled bool) {
|
||||
return handlePostSSZ(m, endpoint, w, req, sszConfig{})
|
||||
}
|
||||
|
||||
func handleProduceBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "produce_beacon_block.ssz",
|
||||
responseJson: &versionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleProduceBlindedBlockSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "produce_blinded_beacon_block.ssz",
|
||||
responseJson: &versionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
@@ -69,7 +103,7 @@ func handleGetSSZ(
|
||||
return false
|
||||
}
|
||||
|
||||
if errJson := prepareSSZRequestForProxying(m, endpoint, req, config.sszPath); errJson != nil {
|
||||
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
@@ -112,6 +146,53 @@ func handleGetSSZ(
|
||||
return true
|
||||
}
|
||||
|
||||
func handlePostSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
config sszConfig,
|
||||
) (handled bool) {
|
||||
if !sszPosted(req) {
|
||||
return false
|
||||
}
|
||||
|
||||
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
prepareCustomHeaders(req)
|
||||
if errJson := preparePostedSSZData(req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
grpcResponse, errJson := m.ProxyRequest(req)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
grpcResponseBody, errJson := apimiddleware.ReadGrpcResponseBody(grpcResponse.Body)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if respHasError {
|
||||
return
|
||||
}
|
||||
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func sszRequested(req *http.Request) bool {
|
||||
accept, ok := req.Header["Accept"]
|
||||
if !ok {
|
||||
@@ -125,24 +206,57 @@ func sszRequested(req *http.Request) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func prepareSSZRequestForProxying(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
req *http.Request, sszPath string,
|
||||
) apimiddleware.ErrorJson {
|
||||
func sszPosted(req *http.Request) bool {
|
||||
ct, ok := req.Header["Content-Type"]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if len(ct) != 1 {
|
||||
return false
|
||||
}
|
||||
return ct[0] == "application/octet-stream"
|
||||
}
|
||||
|
||||
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
|
||||
req.URL.Scheme = "http"
|
||||
req.URL.Host = m.GatewayAddress
|
||||
req.RequestURI = ""
|
||||
req.URL.Path = sszPath
|
||||
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, []string{}); errJson != nil {
|
||||
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, endpoint.RequestURLLiterals); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
// We have to add the prefix after handling parameters because adding the prefix changes URL segment indexing.
|
||||
req.URL.Path = "/internal" + req.URL.Path
|
||||
if errJson := apimiddleware.HandleQueryParameters(req, endpoint.RequestQueryParams); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
// We have to add new segments after handling parameters because it changes URL segment indexing.
|
||||
req.URL.Path = "/internal" + req.URL.Path + "/ssz"
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeMiddlewareResponseIntoSSZ(respJson sszResponseJson) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
|
||||
func prepareCustomHeaders(req *http.Request) {
|
||||
ver := req.Header.Get(versionHeader)
|
||||
if ver != "" {
|
||||
req.Header.Del(versionHeader)
|
||||
req.Header.Add(grpcVersionHeader, ver)
|
||||
}
|
||||
}
|
||||
|
||||
func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
|
||||
}
|
||||
j := sszRequestJson{Data: base64.StdEncoding.EncodeToString(buf)}
|
||||
data, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return apimiddleware.InternalServerErrorWithMessage(err, "could not prepare POST data")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(data))
|
||||
req.ContentLength = int64(len(data))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeMiddlewareResponseIntoSSZ(respJson sszResponse) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
|
||||
// Serialize the SSZ part of the deserialized value.
|
||||
data, err := base64.StdEncoding.DecodeString(respJson.SSZData())
|
||||
if err != nil {
|
||||
@@ -168,7 +282,7 @@ func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWrite
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
|
||||
w.Header().Set("Eth-Consensus-Version", respVersion)
|
||||
w.Header().Set(versionHeader, respVersion)
|
||||
if statusCodeHeader != "" {
|
||||
code, err := strconv.Atoi(statusCodeHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -70,11 +70,21 @@ func TestPrepareSSZRequestForProxying(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
request := httptest.NewRequest("GET", "http://foo.example", &body)
|
||||
|
||||
errJson := prepareSSZRequestForProxying(middleware, endpoint, request, "/ssz")
|
||||
errJson := prepareSSZRequestForProxying(middleware, endpoint, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/internal/ssz", request.URL.Path)
|
||||
}
|
||||
|
||||
func TestPreparePostedSszData(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
body.Write([]byte("body"))
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
preparePostedSSZData(request)
|
||||
assert.Equal(t, int64(19), request.ContentLength)
|
||||
assert.Equal(t, "application/json", request.Header.Get("Content-Type"))
|
||||
}
|
||||
|
||||
func TestSerializeMiddlewareResponseIntoSSZ(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
j := testSSZResponseJson{
|
||||
@@ -133,7 +143,7 @@ func TestWriteSSZResponseHeaderAndBody(t *testing.T) {
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "attachment; filename=test.ssz", v[0])
|
||||
v, ok = writer.Header()["Eth-Consensus-Version"]
|
||||
v, ok = writer.Header()[versionHeader]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "version", v[0])
|
||||
|
||||
@@ -110,11 +110,13 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlockPostRequest,
|
||||
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlockSSZ}
|
||||
case "/eth/v1/beacon/blinded_blocks":
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlindedBlockPostRequest,
|
||||
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlindedBlockSSZ}
|
||||
case "/eth/v1/beacon/blocks/{block_id}":
|
||||
endpoint.GetResponse = &blockResponseJson{}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZ}
|
||||
@@ -221,6 +223,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedV2Block,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlockSSZ}
|
||||
case "/eth/v1/validator/blinded_blocks/{slot}":
|
||||
endpoint.GetResponse = &produceBlindedBlockResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"slot"}
|
||||
@@ -228,6 +231,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
|
||||
case "/eth/v1/validator/attestation_data":
|
||||
endpoint.GetResponse = &produceAttestationDataResponseJson{}
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
|
||||
|
||||
@@ -7,6 +7,10 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
)
|
||||
|
||||
//----------------
|
||||
// Requests and responses.
|
||||
//----------------
|
||||
|
||||
// genesisResponseJson is used in /beacon/genesis API endpoint.
|
||||
type genesisResponseJson struct {
|
||||
Data *genesisResponse_GenesisJson `json:"data"`
|
||||
@@ -457,7 +461,7 @@ type blindedBeaconBlockBodyBellatrixJson struct {
|
||||
Deposits []*depositJson `json:"deposits"`
|
||||
VoluntaryExits []*signedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *syncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *executionPayloadHeaderJson `json:"execution_payload"`
|
||||
ExecutionPayloadHeader *executionPayloadHeaderJson `json:"execution_payload_header"`
|
||||
}
|
||||
|
||||
type executionPayloadJson struct {
|
||||
@@ -489,7 +493,7 @@ type executionPayloadHeaderJson struct {
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||
}
|
||||
@@ -833,63 +837,38 @@ type syncCommitteeContributionJson struct {
|
||||
// SSZ
|
||||
// ---------------
|
||||
|
||||
// sszResponseJson is a common abstraction over all SSZ responses.
|
||||
type sszResponseJson interface {
|
||||
type sszRequestJson struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
// sszResponse is a common abstraction over all SSZ responses.
|
||||
type sszResponse interface {
|
||||
SSZVersion() string
|
||||
SSZData() string
|
||||
}
|
||||
|
||||
// blockSSZResponseJson is used in /beacon/blocks/{block_id} API endpoint.
|
||||
type blockSSZResponseJson struct {
|
||||
type sszResponseJson struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *blockSSZResponseJson) SSZData() string {
|
||||
func (ssz *sszResponseJson) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (*blockSSZResponseJson) SSZVersion() string {
|
||||
func (*sszResponseJson) SSZVersion() string {
|
||||
return strings.ToLower(ethpbv2.Version_PHASE0.String())
|
||||
}
|
||||
|
||||
// blockSSZResponseV2Json is used in /v2/beacon/blocks/{block_id} API endpoint.
|
||||
type blockSSZResponseV2Json struct {
|
||||
type versionedSSZResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *blockSSZResponseV2Json) SSZData() string {
|
||||
func (ssz *versionedSSZResponseJson) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (ssz *blockSSZResponseV2Json) SSZVersion() string {
|
||||
return ssz.Version
|
||||
}
|
||||
|
||||
// beaconStateSSZResponseJson is used in /debug/beacon/states/{state_id} API endpoint.
|
||||
type beaconStateSSZResponseJson struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *beaconStateSSZResponseJson) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (*beaconStateSSZResponseJson) SSZVersion() string {
|
||||
return strings.ToLower(ethpbv2.Version_PHASE0.String())
|
||||
}
|
||||
|
||||
// beaconStateSSZResponseV2Json is used in /v2/debug/beacon/states/{state_id} API endpoint.
|
||||
type beaconStateSSZResponseV2Json struct {
|
||||
Version string `json:"version"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *beaconStateSSZResponseV2Json) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (ssz *beaconStateSSZResponseV2Json) SSZVersion() string {
|
||||
func (ssz *versionedSSZResponseJson) SSZVersion() string {
|
||||
return ssz.Version
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ go_library(
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
@@ -56,6 +57,7 @@ go_library(
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
@@ -114,6 +116,7 @@ go_test(
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
|
||||
@@ -18,16 +18,21 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
const versionHeader = "eth-consensus-version"
|
||||
|
||||
// blockIdParseError represents an error scenario where a block ID could not be parsed.
|
||||
type blockIdParseError struct {
|
||||
message string
|
||||
@@ -109,7 +114,7 @@ func (bs *Server) GetBlockHeader(ctx context.Context, req *ethpbv1.BlockRequest)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoot)
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
@@ -175,7 +180,7 @@ func (bs *Server) ListBlockHeaders(ctx context.Context, req *ethpbv1.BlockHeader
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
|
||||
}
|
||||
if !isOptimistic {
|
||||
isOptimistic, err = bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
|
||||
isOptimistic, err = bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
@@ -223,6 +228,45 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitBlockSSZ instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be
|
||||
// included in the beacon chain. The beacon node is not required to validate the signed BeaconBlock, and a successful
|
||||
// response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the
|
||||
// new block into its state, and therefore validate the block internally, however blocks which fail the validation are
|
||||
// still broadcast but a different status code is returned (202).
|
||||
//
|
||||
// The provided block must be SSZ-serialized.
|
||||
func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
|
||||
}
|
||||
ver := md.Get(versionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
|
||||
}
|
||||
unmarshaler, err := detect.FromForkVersion(forkVer)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
|
||||
}
|
||||
block, err := unmarshaler.UnmarshalBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
|
||||
// and publish a `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
|
||||
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
|
||||
@@ -259,6 +303,48 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockSSZ instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
|
||||
// and publish a `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
|
||||
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
|
||||
// to be included in the beacon chain. The beacon node is not required to validate the signed
|
||||
// `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
|
||||
// successful. The beacon node is expected to integrate the new block into its state, and
|
||||
// therefore validate the block internally, however blocks which fail the validation are still
|
||||
// broadcast but a different status code is returned (202).
|
||||
//
|
||||
// The provided block must be SSZ-serialized.
|
||||
func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
|
||||
}
|
||||
ver := md.Get(versionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
|
||||
}
|
||||
unmarshaler, err := detect.FromForkVersion(forkVer)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
|
||||
}
|
||||
block, err := unmarshaler.UnmarshalBlindedBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
|
||||
}
|
||||
|
||||
// GetBlock retrieves block details for given block ID.
|
||||
func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlock")
|
||||
@@ -371,7 +457,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
@@ -393,7 +479,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
}
|
||||
|
||||
// GetBlockSSZV2 returns the SSZ-serialized version of the beacon block for given block ID.
|
||||
func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.BlockSSZResponseV2, error) {
|
||||
func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockSSZV2")
|
||||
defer span.End()
|
||||
|
||||
@@ -413,7 +499,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
|
||||
}
|
||||
return ðpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_PHASE0, Data: sszBlock}, nil
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_PHASE0, Data: sszBlock}, nil
|
||||
}
|
||||
// ErrUnsupportedPhase0Block means that we have another block type
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedPhase0Block) {
|
||||
@@ -437,7 +523,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
|
||||
}
|
||||
return ðpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_ALTAIR, Data: sszData}, nil
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_ALTAIR, Data: sszData}, nil
|
||||
}
|
||||
// ErrUnsupportedAltairBlock means that we have another block type
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedAltairBlock) {
|
||||
@@ -461,7 +547,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
|
||||
}
|
||||
return ðpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_BELLATRIX, Data: sszData}, nil
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_BELLATRIX, Data: sszData}, nil
|
||||
}
|
||||
// ErrUnsupportedBellatrixBlock means that we have another block type
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedBellatrixBlock) {
|
||||
@@ -543,7 +629,7 @@ func (bs *Server) GetBlockRoot(ctx context.Context, req *ethpbv1.BlockRequest) (
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
@@ -616,7 +702,7 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (*ethpbalpha.SignedBeaconBlock, []*ethpbalpha.BeaconBlockContainer) {
|
||||
@@ -187,9 +189,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -287,9 +290,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
header, err := bs.GetBlockHeader(ctx, ðpbv1.BlockRequest{BlockId: []byte("head")})
|
||||
require.NoError(t, err)
|
||||
@@ -312,9 +316,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
}
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
@@ -416,9 +421,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
}
|
||||
slot := types.Slot(30)
|
||||
headers, err := bs.ListBlockHeaders(ctx, ðpbv1.BlockHeadersRequest{
|
||||
@@ -557,6 +563,289 @@ func TestServer_SubmitBlock_OK(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_SubmitBlockSSZ_OK(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlock()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockAltair()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlockAltair()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
// INFO: This code block can be removed once Bellatrix
|
||||
// fork epoch is set to a value other than math.MaxUint64
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
|
||||
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockBellatrix()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlockBellatrix()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_SubmitBlindedBlockSSZ_OK(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlock()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockAltair()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlockAltair()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
// INFO: This code block can be removed once Bellatrix
|
||||
// fork epoch is set to a value other than math.MaxUint64
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
|
||||
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockBellatrix()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBlindedBeaconBlockBellatrix()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
@@ -836,9 +1125,10 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
@@ -955,9 +1245,10 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
|
||||
@@ -1074,9 +1365,10 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
|
||||
@@ -1194,9 +1486,10 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlockV2(ctx, ðpbv2.BlockRequestV2{
|
||||
@@ -1395,9 +1688,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
@@ -1485,9 +1779,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
}
|
||||
blockRootResp, err := bs.GetBlockRoot(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("head"),
|
||||
@@ -1513,9 +1808,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
@@ -1615,9 +1911,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
|
||||
@@ -1717,9 +2014,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
|
||||
@@ -1820,9 +2118,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
resp, err := bs.ListBlockAttestations(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("head"),
|
||||
|
||||
@@ -34,6 +34,7 @@ type Server struct {
|
||||
StateGenService stategen.StateManager
|
||||
StateFetcher statefetcher.Fetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
V1Alpha1ValidatorServer *v1alpha1validator.Server
|
||||
SyncChecker sync.Checker
|
||||
CanonicalHistory *stategen.CanonicalHistory
|
||||
|
||||
@@ -75,7 +75,7 @@ func (bs *Server) GetStateRoot(ctx context.Context, req *ethpb.StateRequest) (*e
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -103,7 +103,7 @@ func (bs *Server) GetStateFork(ctx context.Context, req *ethpb.StateRequest) (*e
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
fork := st.Fork()
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -133,7 +133,7 @@ func (bs *Server) GetFinalityCheckpoints(ctx context.Context, req *ethpb.StateRe
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -80,13 +80,15 @@ func TestGetStateRoot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := server.GetStateRoot(context.Background(), ð.StateRequest{
|
||||
@@ -107,13 +109,15 @@ func TestGetStateRoot(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetStateRoot(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -138,12 +142,14 @@ func TestGetStateFork(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := server.GetStateFork(ctx, ð.StateRequest{
|
||||
@@ -167,12 +173,14 @@ func TestGetStateFork(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetStateFork(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -204,12 +212,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := server.GetFinalityCheckpoints(ctx, ð.StateRequest{
|
||||
@@ -235,12 +245,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetFinalityCheckpoints(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
|
||||
@@ -91,7 +91,7 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
|
||||
return nil, status.Errorf(codes.Internal, "Could not extract sync subcommittees: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -162,6 +162,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &mock.ChainService{}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -169,8 +170,9 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
req := ðpbv2.StateSyncCommitteesRequest{StateId: stRoot[:]}
|
||||
resp, err := s.ListSyncCommittees(ctx, req)
|
||||
@@ -205,6 +207,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &mock.ChainService{Optimistic: true}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -212,8 +215,9 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := s.ListSyncCommittees(ctx, req)
|
||||
require.NoError(t, err)
|
||||
@@ -261,6 +265,7 @@ func TestListSyncCommitteesFuture(t *testing.T) {
|
||||
}))
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &mock.ChainService{}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -268,8 +273,9 @@ func TestListSyncCommitteesFuture(t *testing.T) {
|
||||
StateFetcher: &futureSyncMockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
req := ðpbv2.StateSyncCommitteesRequest{}
|
||||
epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
|
||||
@@ -57,7 +57,7 @@ func (bs *Server) GetValidator(ctx context.Context, req *ethpb.StateValidatorReq
|
||||
return nil, status.Error(codes.NotFound, "Could not find validator")
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
|
||||
return nil, handleValContainerErr(err)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.Validato
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -202,7 +202,7 @@ func (bs *Server) ListCommittees(ctx context.Context, req *ethpb.StateCommittees
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -33,12 +33,14 @@ func TestGetValidator(t *testing.T) {
|
||||
st, _ = util.DeterministicGenesisState(t, 8192)
|
||||
|
||||
t.Run("Head Get Validator by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.GetValidator(ctx, ðpb.StateValidatorRequest{
|
||||
@@ -50,12 +52,14 @@ func TestGetValidator(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head Get Validator by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
pubKey := st.PubkeyAtIndex(types.ValidatorIndex(20))
|
||||
@@ -93,12 +97,14 @@ func TestGetValidator(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := s.GetValidator(ctx, ðpb.StateValidatorRequest{
|
||||
StateId: []byte("head"),
|
||||
@@ -117,12 +123,14 @@ func TestListValidators(t *testing.T) {
|
||||
st, _ = util.DeterministicGenesisState(t, 8192)
|
||||
|
||||
t.Run("Head List All Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -136,12 +144,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
|
||||
@@ -158,12 +168,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
idNums := []types.ValidatorIndex{20, 66, 90, 100}
|
||||
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
|
||||
@@ -184,12 +196,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators by both index and pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
idNums := []types.ValidatorIndex{20, 90, 170, 129}
|
||||
@@ -212,12 +226,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Unknown public key is ignored", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
existingKey := st.PubkeyAtIndex(types.ValidatorIndex(1))
|
||||
@@ -232,12 +248,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Unknown index is ignored", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
ids := [][]byte{[]byte("1"), []byte("99999")}
|
||||
@@ -261,12 +279,14 @@ func TestListValidators(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
StateId: []byte("head"),
|
||||
@@ -349,12 +369,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("Head List All ACTIVE Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -384,12 +406,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List All ACTIVE_ONGOING Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -418,12 +442,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*35))
|
||||
t.Run("Head List All EXITED Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -451,12 +477,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List All PENDING_INITIALIZED and EXITED_UNSLASHED Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -484,12 +512,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List All PENDING and EXITED Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -532,12 +562,14 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
t.Run("Head List Validators Balance by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
|
||||
@@ -554,12 +586,14 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators Balance by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
idNums := []types.ValidatorIndex{20, 66, 90, 100}
|
||||
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
|
||||
@@ -579,12 +613,14 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators Balance by both index and pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
idNums := []types.ValidatorIndex{20, 90, 170, 129}
|
||||
@@ -613,12 +649,14 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
|
||||
@@ -640,12 +678,14 @@ func TestListCommittees(t *testing.T) {
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
t.Run("Head All Committees", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListCommittees(ctx, ðpb.StateCommitteesRequest{
|
||||
@@ -660,12 +700,14 @@ func TestListCommittees(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head All Committees of Epoch 10", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
epoch := types.Epoch(10)
|
||||
resp, err := s.ListCommittees(ctx, ðpb.StateCommitteesRequest{
|
||||
@@ -679,12 +721,14 @@ func TestListCommittees(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head All Committees of Slot 4", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
slot := types.Slot(4)
|
||||
@@ -704,12 +748,14 @@ func TestListCommittees(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head All Committees of Index 1", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
index := types.CommitteeIndex(1)
|
||||
@@ -729,12 +775,14 @@ func TestListCommittees(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head All Committees of Slot 2, Index 1", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
index := types.CommitteeIndex(1)
|
||||
@@ -764,12 +812,14 @@ func TestListCommittees(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListCommittees(ctx, ðpb.StateCommitteesRequest{
|
||||
|
||||
@@ -39,7 +39,7 @@ func (ds *Server) GetBeaconState(ctx context.Context, req *ethpbv1.StateRequest)
|
||||
}
|
||||
|
||||
// GetBeaconStateSSZ returns the SSZ-serialized version of the full beacon state object for given state ID.
|
||||
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv1.BeaconStateSSZResponse, error) {
|
||||
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZ")
|
||||
defer span.End()
|
||||
|
||||
@@ -53,7 +53,7 @@ func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateReque
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal state into SSZ: %v", err)
|
||||
}
|
||||
|
||||
return ðpbv1.BeaconStateSSZResponse{Data: sszState}, nil
|
||||
return ðpbv2.SSZContainer{Data: sszState}, nil
|
||||
}
|
||||
|
||||
// GetBeaconStateV2 returns the full beacon state for a given state ID.
|
||||
@@ -65,7 +65,7 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -121,21 +121,32 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
|
||||
}
|
||||
|
||||
// GetBeaconStateSSZV2 returns the SSZ-serialized version of the full beacon state object for given state ID.
|
||||
func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.StateRequestV2) (*ethpbv2.BeaconStateSSZResponseV2, error) {
|
||||
func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.StateRequestV2) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZV2")
|
||||
defer span.End()
|
||||
|
||||
state, err := ds.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := ds.StateFetcher.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
|
||||
sszState, err := state.MarshalSSZ()
|
||||
sszState, err := st.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal state into SSZ: %v", err)
|
||||
}
|
||||
var ver ethpbv2.Version
|
||||
switch st.Version() {
|
||||
case version.Phase0:
|
||||
ver = ethpbv2.Version_PHASE0
|
||||
case version.Altair:
|
||||
ver = ethpbv2.Version_ALTAIR
|
||||
case version.Bellatrix:
|
||||
ver = ethpbv2.Version_BELLATRIX
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
|
||||
return ðpbv2.BeaconStateSSZResponseV2{Data: sszState}, nil
|
||||
return ðpbv2.SSZContainer{Data: sszState, Version: ver}, nil
|
||||
}
|
||||
|
||||
// ListForkChoiceHeads retrieves the leaves of the current fork choice tree.
|
||||
@@ -167,7 +178,7 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
|
||||
Data: make([]*ethpbv2.ForkChoiceHead, len(headRoots)),
|
||||
}
|
||||
for i := range headRoots {
|
||||
isOptimistic, err := ds.HeadFetcher.IsOptimisticForRoot(ctx, headRoots[i])
|
||||
isOptimistic, err := ds.OptimisticModeFetcher.IsOptimisticForRoot(ctx, headRoots[i])
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if head is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -44,8 +44,9 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.StateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -60,8 +61,9 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.StateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -76,8 +78,9 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.StateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -102,8 +105,9 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.StateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -153,6 +157,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
assert.NotNil(t, resp)
|
||||
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
@@ -171,6 +176,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
assert.NotNil(t, resp)
|
||||
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
@@ -189,6 +195,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
assert.NotNil(t, resp)
|
||||
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -238,8 +245,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
|
||||
}}
|
||||
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
server := &Server{
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
@@ -257,8 +266,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("optimistic head", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -12,7 +12,8 @@ import (
|
||||
// Server defines a server implementation of the gRPC Beacon Chain service,
|
||||
// providing RPC endpoints to access data relevant to the Ethereum Beacon Chain.
|
||||
type Server struct {
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
StateFetcher statefetcher.Fetcher
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
StateFetcher statefetcher.Fetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blo
|
||||
}
|
||||
|
||||
// IsOptimistic checks whether the latest block header of the passed in beacon state is the header of an optimistic block.
|
||||
func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockchain.HeadFetcher) (bool, error) {
|
||||
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticSyncFetcher blockchain.OptimisticModeFetcher) (bool, error) {
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get state root")
|
||||
@@ -50,7 +50,7 @@ func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockch
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get header root")
|
||||
}
|
||||
isOptimistic, err := headFetcher.IsOptimisticForRoot(ctx, headRoot)
|
||||
isOptimistic, err := optimisticSyncFetcher.IsOptimisticForRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not check if block is optimistic")
|
||||
}
|
||||
|
||||
@@ -58,14 +58,14 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("optimistic", func(t *testing.T) {
|
||||
mockHeadFetcher := &chainmock.ChainService{Optimistic: true}
|
||||
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
|
||||
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: true}
|
||||
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("not optimistic", func(t *testing.T) {
|
||||
mockHeadFetcher := &chainmock.ChainService{Optimistic: false}
|
||||
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
|
||||
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: false}
|
||||
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
|
||||
@@ -13,14 +13,15 @@ import (
|
||||
// Server defines a server implementation of the gRPC Validator service,
|
||||
// providing RPC endpoints intended for validator clients.
|
||||
type Server struct {
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
AttestationsPool attestations.Pool
|
||||
PeerManager p2p.PeerManager
|
||||
Broadcaster p2p.Broadcaster
|
||||
StateFetcher statefetcher.Fetcher
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
V1Alpha1Server *v1alpha1validator.Server
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
AttestationsPool attestations.Pool
|
||||
PeerManager p2p.PeerManager
|
||||
Broadcaster p2p.Broadcaster
|
||||
StateFetcher statefetcher.Fetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
V1Alpha1Server *v1alpha1validator.Server
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -142,7 +142,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -258,7 +258,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
|
||||
return nil, status.Errorf(codes.Internal, "Could not get duties: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.HeadFetcher)
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -348,6 +348,76 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// ProduceBlockV2SSZ requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// The produced block is in SSZ form.
|
||||
func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
//
|
||||
@@ -413,6 +483,79 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// ProduceBlindedBlockSSZ requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// The produced block is in SSZ form.
|
||||
//
|
||||
// Pre-Bellatrix, this endpoint will return a regular block.
|
||||
func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2Blinded(bellatrixBlock.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
func (vs *Server) PrepareBeaconProposer(
|
||||
ctx context.Context, request *ethpbv1.PrepareBeaconProposerRequest,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -178,9 +178,10 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
as := &Server{
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: true},
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
}
|
||||
_, err := as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{})
|
||||
s, ok := status.FromError(err)
|
||||
@@ -191,10 +192,11 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
as = &Server{
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
}
|
||||
_, err = as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -120,18 +120,20 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
|
||||
|
||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
|
||||
burnAddr := bytesutil.PadTo([]byte{}, fieldparams.FeeRecipientLength)
|
||||
switch err == nil {
|
||||
case true:
|
||||
feeRecipient = recipient
|
||||
case errors.As(err, kv.ErrNotFoundFeeRecipient):
|
||||
// If fee recipient is not found in DB and not set from beacon node CLI,
|
||||
// use the burn address.
|
||||
if bytes.Equal(feeRecipient.Bytes(), burnAddr) {
|
||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": vIdx,
|
||||
"burnAddress": burnAddr,
|
||||
}).Error("Fee recipient not set. Using burn address")
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not get fee recipient in db")
|
||||
|
||||
@@ -2413,7 +2413,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
|
||||
assert.DeepEqual(t, randaoReveal, bellatrixBlk.Bellatrix.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, bellatrixBlk.Bellatrix.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
|
||||
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
require.DeepEqual(t, payload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
|
||||
// Operator sets default fee recipient to not be burned through beacon node cli.
|
||||
@@ -2424,7 +2424,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
_, err = proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, newHook, "Fee recipient not set. Using burn address")
|
||||
require.LogsDoNotContain(t, newHook, "Fee recipient is currently using the burn address")
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
@@ -2437,7 +2437,7 @@ func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerServer := &Server{HeadFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
|
||||
proposerServer := &Server{OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ type Server struct {
|
||||
DepositFetcher depositcache.DepositFetcher
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
Eth1InfoFetcher powchain.ChainInfoFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncChecker sync.Checker
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
|
||||
@@ -255,7 +255,7 @@ func (vs *Server) optimisticStatus(ctx context.Context) error {
|
||||
if slots.ToEpoch(vs.TimeFetcher.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return nil
|
||||
}
|
||||
optimistic, err := vs.HeadFetcher.IsOptimistic(ctx)
|
||||
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
|
||||
}
|
||||
|
||||
@@ -603,7 +603,7 @@ func TestActivationStatus_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOptimisticStatus(t *testing.T) {
|
||||
server := &Server{HeadFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}}
|
||||
server := &Server{OptimisticModeFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}}
|
||||
err := server.optimisticStatus(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -612,14 +612,14 @@ func TestOptimisticStatus(t *testing.T) {
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server = &Server{HeadFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}}
|
||||
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}}
|
||||
err = server.optimisticStatus(context.Background())
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, codes.Unavailable, s.Code())
|
||||
require.ErrorContains(t, errOptimisticMode.Error(), err)
|
||||
|
||||
server = &Server{HeadFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}}
|
||||
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}}
|
||||
err = server.optimisticStatus(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -42,8 +42,9 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server := &Server{
|
||||
HeadFetcher: &mock.ChainService{Optimistic: true},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
}
|
||||
_, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
|
||||
s, ok := status.FromError(err)
|
||||
@@ -52,8 +53,9 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
|
||||
require.ErrorContains(t, errOptimisticMode.Error(), err)
|
||||
|
||||
server = &Server{
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
}
|
||||
_, err = server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -111,6 +111,7 @@ type Config struct {
|
||||
MaxMsgSize int
|
||||
ExecutionEngineCaller powchain.EngineCaller
|
||||
ProposerIdsCache *cache.ProposerPayloadIDsCache
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
@@ -198,6 +199,7 @@ func (s *Service) Start() {
|
||||
DepositFetcher: s.cfg.DepositFetcher,
|
||||
ChainStartFetcher: s.cfg.ChainStartFetcher,
|
||||
Eth1InfoFetcher: s.cfg.POWChainService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
StateNotifier: s.cfg.StateNotifier,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
@@ -231,7 +233,8 @@ func (s *Service) Start() {
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
}
|
||||
|
||||
nodeServer := &nodev1alpha1.Server{
|
||||
@@ -301,6 +304,7 @@ func (s *Service) Start() {
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
VoluntaryExitsPool: s.cfg.ExitPool,
|
||||
V1Alpha1ValidatorServer: validatorServer,
|
||||
@@ -339,6 +343,7 @@ func (s *Service) Start() {
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
}
|
||||
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
|
||||
ethpbservice.RegisterBeaconDebugServer(s.grpcServer, debugServerV1)
|
||||
|
||||
@@ -154,7 +154,8 @@ func (b *BeaconState) NumValidators() int {
|
||||
}
|
||||
|
||||
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
|
||||
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
//
|
||||
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
|
||||
if b.validators == nil {
|
||||
return errors.New("nil validators in state")
|
||||
|
||||
@@ -541,6 +541,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
// Initializes the Merkle layers for the beacon state if they are empty.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
if len(b.merkleLayers) > 0 {
|
||||
@@ -565,6 +566,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Recomputes the Merkle layers for the dirty fields in the state.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
|
||||
for field := range b.dirtyFields {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
// PreviousEpochAttestations corresponding to blocks on the beacon chain.
|
||||
func (b *BeaconState) PreviousEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
if b.state.PreviousEpochAttestations == nil {
|
||||
return nil, nil
|
||||
@@ -32,7 +32,7 @@ func (b *BeaconState) previousEpochAttestations() []*ethpb.PendingAttestation {
|
||||
// CurrentEpochAttestations corresponding to blocks on the beacon chain.
|
||||
func (b *BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
if b.state.CurrentEpochAttestations == nil {
|
||||
return nil, nil
|
||||
|
||||
@@ -56,9 +56,9 @@ func TestNilState_NoPanic(t *testing.T) {
|
||||
_ = st.RandaoMixesLength()
|
||||
_ = st.Slashings()
|
||||
_, err = st.PreviousEpochAttestations()
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.CurrentEpochAttestations()
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_ = st.JustificationBits()
|
||||
_ = st.PreviousJustifiedCheckpoint()
|
||||
_ = st.CurrentJustifiedCheckpoint()
|
||||
|
||||
@@ -174,7 +174,8 @@ func (b *BeaconState) NumValidators() int {
|
||||
}
|
||||
|
||||
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
|
||||
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
//
|
||||
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
|
||||
@@ -213,6 +213,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
// Initializes the Merkle layers for the beacon state if they are empty.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
if len(b.merkleLayers) > 0 {
|
||||
@@ -229,6 +230,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Recomputes the Merkle layers for the dirty fields in the state.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
|
||||
for field := range b.dirtyFields {
|
||||
|
||||
@@ -3,7 +3,7 @@ package v2
|
||||
// CurrentEpochParticipation corresponding to participation bits on the beacon chain.
|
||||
func (b *BeaconState) CurrentEpochParticipation() ([]byte, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
if b.state.CurrentEpochParticipation == nil {
|
||||
return nil, nil
|
||||
@@ -18,7 +18,7 @@ func (b *BeaconState) CurrentEpochParticipation() ([]byte, error) {
|
||||
// PreviousEpochParticipation corresponding to participation bits on the beacon chain.
|
||||
func (b *BeaconState) PreviousEpochParticipation() ([]byte, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
if b.state.PreviousEpochParticipation == nil {
|
||||
return nil, nil
|
||||
|
||||
@@ -28,7 +28,7 @@ func (b *BeaconState) nextSyncCommittee() *ethpb.SyncCommittee {
|
||||
// CurrentSyncCommittee of the current sync committee in beacon chain state.
|
||||
func (b *BeaconState) CurrentSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
@@ -44,7 +44,7 @@ func (b *BeaconState) CurrentSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
// NextSyncCommittee of the next sync committee in beacon chain state.
|
||||
func (b *BeaconState) NextSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
testtmpl "github.com/prysmaticlabs/prysm/beacon-chain/state/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestBeaconState_SlotDataRace(t *testing.T) {
|
||||
@@ -51,28 +52,28 @@ func TestNilState_NoPanic(t *testing.T) {
|
||||
_ = st.BalancesLength()
|
||||
_ = st.RandaoMixes()
|
||||
_, err = st.RandaoMixAtIndex(0)
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_ = st.RandaoMixesLength()
|
||||
_ = st.Slashings()
|
||||
_, err = st.CurrentEpochParticipation()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.PreviousEpochParticipation()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_ = st.JustificationBits()
|
||||
_ = err
|
||||
_ = st.PreviousJustifiedCheckpoint()
|
||||
_ = st.CurrentJustifiedCheckpoint()
|
||||
_ = st.FinalizedCheckpoint()
|
||||
_, err = st.CurrentEpochParticipation()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.PreviousEpochParticipation()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.InactivityScores()
|
||||
_ = err
|
||||
_, err = st.CurrentSyncCommittee()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.NextSyncCommittee()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
}
|
||||
|
||||
func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {
|
||||
|
||||
@@ -175,7 +175,8 @@ func (b *BeaconState) NumValidators() int {
|
||||
}
|
||||
|
||||
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
|
||||
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
//
|
||||
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
|
||||
@@ -218,6 +218,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
// Initializes the Merkle layers for the beacon state if they are empty.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
if len(b.merkleLayers) > 0 {
|
||||
@@ -234,6 +235,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Recomputes the Merkle layers for the dirty fields in the state.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
|
||||
for field := range b.dirtyFields {
|
||||
|
||||
@@ -3,7 +3,7 @@ package v3
|
||||
// CurrentEpochParticipation corresponding to participation bits on the beacon chain.
|
||||
func (b *BeaconState) CurrentEpochParticipation() ([]byte, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
if b.state.CurrentEpochParticipation == nil {
|
||||
return nil, nil
|
||||
@@ -18,7 +18,7 @@ func (b *BeaconState) CurrentEpochParticipation() ([]byte, error) {
|
||||
// PreviousEpochParticipation corresponding to participation bits on the beacon chain.
|
||||
func (b *BeaconState) PreviousEpochParticipation() ([]byte, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
if b.state.PreviousEpochParticipation == nil {
|
||||
return nil, nil
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
// LatestExecutionPayloadHeader of the beacon state.
|
||||
func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
if b.state.LatestExecutionPayloadHeader == nil {
|
||||
return nil, nil
|
||||
|
||||
@@ -28,7 +28,7 @@ func (b *BeaconState) nextSyncCommittee() *ethpb.SyncCommittee {
|
||||
// CurrentSyncCommittee of the current sync committee in beacon chain state.
|
||||
func (b *BeaconState) CurrentSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
@@ -44,7 +44,7 @@ func (b *BeaconState) CurrentSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
// NextSyncCommittee of the next sync committee in beacon chain state.
|
||||
func (b *BeaconState) NextSyncCommittee() (*ethpb.SyncCommittee, error) {
|
||||
if !b.hasInnerState() {
|
||||
return nil, nil
|
||||
return nil, ErrNilInnerState
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
testtmpl "github.com/prysmaticlabs/prysm/beacon-chain/state/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestBeaconState_SlotDataRace(t *testing.T) {
|
||||
@@ -55,23 +56,26 @@ func TestNilState_NoPanic(t *testing.T) {
|
||||
_ = st.RandaoMixesLength()
|
||||
_ = st.Slashings()
|
||||
_, err = st.CurrentEpochParticipation()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.PreviousEpochParticipation()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_ = st.JustificationBits()
|
||||
_ = st.PreviousJustifiedCheckpoint()
|
||||
_ = st.CurrentJustifiedCheckpoint()
|
||||
_ = st.FinalizedCheckpoint()
|
||||
_, err = st.CurrentEpochParticipation()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.PreviousEpochParticipation()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.InactivityScores()
|
||||
_ = err
|
||||
_, err = st.CurrentSyncCommittee()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.NextSyncCommittee()
|
||||
_ = err
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
_, err = st.LatestExecutionPayloadHeader()
|
||||
require.ErrorIs(t, ErrNilInnerState, err)
|
||||
|
||||
}
|
||||
|
||||
func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {
|
||||
|
||||
@@ -175,7 +175,8 @@ func (b *BeaconState) NumValidators() int {
|
||||
}
|
||||
|
||||
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
|
||||
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
//
|
||||
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
|
||||
if !b.hasInnerState() {
|
||||
return ErrNilInnerState
|
||||
|
||||
@@ -218,6 +218,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
// Initializes the Merkle layers for the beacon state if they are empty.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
if len(b.merkleLayers) > 0 {
|
||||
@@ -234,6 +235,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Recomputes the Merkle layers for the dirty fields in the state.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) recomputeDirtyFields(_ context.Context) error {
|
||||
for field := range b.dirtyFields {
|
||||
|
||||
@@ -223,6 +223,7 @@ go_test(
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
|
||||
@@ -142,6 +142,7 @@ func (s *Service) processFetchedDataRegSync(
|
||||
|
||||
blockReceiver := s.cfg.Chain.ReceiveBlock
|
||||
invalidBlocks := 0
|
||||
blksWithoutParentCount := 0
|
||||
for _, blk := range data.blocks {
|
||||
if err := s.processBlock(ctx, genesis, blk, blockReceiver); err != nil {
|
||||
switch {
|
||||
@@ -149,7 +150,7 @@ func (s *Service) processFetchedDataRegSync(
|
||||
log.WithError(err).Debug("Block is not processed")
|
||||
invalidBlocks++
|
||||
case errors.Is(err, errParentDoesNotExist):
|
||||
log.WithError(err).Debug("Block is not processed")
|
||||
blksWithoutParentCount++
|
||||
invalidBlocks++
|
||||
default:
|
||||
log.WithError(err).Warn("Block is not processed")
|
||||
@@ -157,6 +158,13 @@ func (s *Service) processFetchedDataRegSync(
|
||||
continue
|
||||
}
|
||||
}
|
||||
if blksWithoutParentCount > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"missingParent": fmt.Sprintf("%#x", data.blocks[0].Block().ParentRoot()),
|
||||
"firstSlot": data.blocks[0].Block().Slot(),
|
||||
"lastSlot": data.blocks[blksWithoutParentCount-1].Block().Slot(),
|
||||
}).Debug("Could not process batch blocks due to missing parent")
|
||||
}
|
||||
// Add more visible logging if all blocks cannot be processed.
|
||||
if len(data.blocks) == invalidBlocks {
|
||||
log.WithField("error", "Range had no valid blocks to process").Warn("Range is not processed")
|
||||
|
||||
@@ -95,6 +95,7 @@ type blockchainService interface {
|
||||
blockchain.TimeFetcher
|
||||
blockchain.GenesisFetcher
|
||||
blockchain.CanonicalFetcher
|
||||
blockchain.OptimisticModeFetcher
|
||||
blockchain.SlashingReceiver
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
}
|
||||
|
||||
if err := s.cfg.chain.ReceiveBlock(ctx, signed, root); err != nil {
|
||||
if !errors.Is(err, powchain.ErrHTTPTimeout) && !errors.Is(blockchain.ErrUndefinedExecutionEngineError, err) {
|
||||
if !errors.Is(err, powchain.ErrHTTPTimeout) && !errors.Is(err, blockchain.ErrUndefinedExecutionEngineError) {
|
||||
interop.WriteBlockToDisk(signed, true /*failed*/)
|
||||
s.setBadBlock(ctx, root)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -125,3 +127,21 @@ func TestService_BeaconBlockSubscribe_ExecutionEngineTimesOut(t *testing.T) {
|
||||
require.Equal(t, 0, len(s.badBlockCache.Keys()))
|
||||
require.Equal(t, 1, len(s.seenBlockCache.Keys()))
|
||||
}
|
||||
|
||||
func TestService_BeaconBlockSubscribe_UndefinedEeError(t *testing.T) {
|
||||
msg := "timeout"
|
||||
err := errors.WithMessage(blockchain.ErrUndefinedExecutionEngineError, msg)
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: &chainMock.ChainService{
|
||||
ReceiveBlockMockErr: err,
|
||||
},
|
||||
},
|
||||
seenBlockCache: lruwrpr.New(10),
|
||||
badBlockCache: lruwrpr.New(10),
|
||||
}
|
||||
require.ErrorIs(t, s.beaconBlockSubscriber(context.Background(), util.NewBeaconBlock()), blockchain.ErrUndefinedExecutionEngineError)
|
||||
require.Equal(t, 0, len(s.badBlockCache.Keys()))
|
||||
require.Equal(t, 1, len(s.seenBlockCache.Keys()))
|
||||
}
|
||||
|
||||
@@ -4,8 +4,9 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -13,41 +14,265 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func FuzzValidateBeaconBlockPubSub(f *testing.F) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
func FuzzValidateBeaconBlockPubSub_Phase0(f *testing.F) {
|
||||
db := dbtest.SetupDB(f)
|
||||
p := p2ptest.NewFuzzTestP2P()
|
||||
ctx := context.Background()
|
||||
beaconState, privKeys := util.DeterministicGenesisState(f, 100)
|
||||
parentBlock := util.NewBeaconBlock()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(parentBlock)
|
||||
require.NoError(f, err)
|
||||
require.NoError(f, db.SaveBlock(ctx, wsb))
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(f, err)
|
||||
require.NoError(f, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(f, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(f, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
|
||||
require.NoError(f, err)
|
||||
msg := util.NewBeaconBlock()
|
||||
msg.Block.ParentRoot = bRoot[:]
|
||||
msg.Block.Slot = 1
|
||||
msg.Block.ProposerIndex = proposerIdx
|
||||
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(f, err)
|
||||
|
||||
stateGen := stategen.New(db)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
DB: db,
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: &chainMock.ChainService{},
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
blockNotifier: chainService.BlockNotifier(),
|
||||
stateGen: stateGen,
|
||||
},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
seenBlockCache: lruwrpr.New(10),
|
||||
badBlockCache: lruwrpr.New(10),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
validTopic := fmt.Sprintf(p2p.BlockSubnetTopicFormat, []byte{0xb5, 0x30, 0x3f, 0x2a}) + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
f.Add("junk", []byte("junk"), []byte("junk"), []byte("junk"), []byte(validTopic), []byte("junk"), []byte("junk"))
|
||||
f.Fuzz(func(t *testing.T, pid string, from, data, seqno, topic, signature, key []byte) {
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(f, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(f, err)
|
||||
topic = r.addDigestToTopic(topic, digest)
|
||||
|
||||
f.Add("junk", []byte("junk"), buf.Bytes(), []byte(topic))
|
||||
f.Fuzz(func(t *testing.T, pid string, from, data, topic []byte) {
|
||||
r.cfg.p2p = p2ptest.NewFuzzTestP2P()
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
cService := &mock.ChainService{
|
||||
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot*10000000), 0),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
DB: db,
|
||||
}
|
||||
r.cfg.chain = cService
|
||||
r.cfg.blockNotifier = cService.BlockNotifier()
|
||||
strTop := string(topic)
|
||||
msg := &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
From: from,
|
||||
Data: data,
|
||||
Seqno: seqno,
|
||||
Topic: &strTop,
|
||||
Signature: signature,
|
||||
Key: key,
|
||||
From: from,
|
||||
Data: data,
|
||||
Topic: &strTop,
|
||||
},
|
||||
}
|
||||
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzValidateBeaconBlockPubSub_Altair(f *testing.F) {
|
||||
db := dbtest.SetupDB(f)
|
||||
p := p2ptest.NewFuzzTestP2P()
|
||||
ctx := context.Background()
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(f, 100)
|
||||
parentBlock := util.NewBeaconBlockAltair()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(parentBlock)
|
||||
require.NoError(f, err)
|
||||
require.NoError(f, db.SaveBlock(ctx, wsb))
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(f, err)
|
||||
require.NoError(f, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(f, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(f, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
|
||||
require.NoError(f, err)
|
||||
msg := util.NewBeaconBlock()
|
||||
msg.Block.ParentRoot = bRoot[:]
|
||||
msg.Block.Slot = 1
|
||||
msg.Block.ProposerIndex = proposerIdx
|
||||
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(f, err)
|
||||
|
||||
stateGen := stategen.New(db)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
DB: db,
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
blockNotifier: chainService.BlockNotifier(),
|
||||
stateGen: stateGen,
|
||||
},
|
||||
seenBlockCache: lruwrpr.New(10),
|
||||
badBlockCache: lruwrpr.New(10),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(f, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(f, err)
|
||||
topic = r.addDigestToTopic(topic, digest)
|
||||
|
||||
f.Add("junk", []byte("junk"), buf.Bytes(), []byte(topic))
|
||||
f.Fuzz(func(t *testing.T, pid string, from, data, topic []byte) {
|
||||
r.cfg.p2p = p2ptest.NewFuzzTestP2P()
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
cService := &mock.ChainService{
|
||||
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot*10000000), 0),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
DB: db,
|
||||
}
|
||||
r.cfg.chain = cService
|
||||
r.cfg.blockNotifier = cService.BlockNotifier()
|
||||
strTop := string(topic)
|
||||
msg := &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
From: from,
|
||||
Data: data,
|
||||
Topic: &strTop,
|
||||
},
|
||||
}
|
||||
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzValidateBeaconBlockPubSub_Bellatrix(f *testing.F) {
|
||||
db := dbtest.SetupDB(f)
|
||||
p := p2ptest.NewFuzzTestP2P()
|
||||
ctx := context.Background()
|
||||
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(f, 100)
|
||||
parentBlock := util.NewBeaconBlockBellatrix()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(parentBlock)
|
||||
require.NoError(f, err)
|
||||
require.NoError(f, db.SaveBlock(ctx, wsb))
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(f, err)
|
||||
require.NoError(f, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(f, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(f, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
|
||||
require.NoError(f, err)
|
||||
msg := util.NewBeaconBlock()
|
||||
msg.Block.ParentRoot = bRoot[:]
|
||||
msg.Block.Slot = 1
|
||||
msg.Block.ProposerIndex = proposerIdx
|
||||
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(f, err)
|
||||
|
||||
stateGen := stategen.New(db)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
DB: db,
|
||||
}
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
blockNotifier: chainService.BlockNotifier(),
|
||||
stateGen: stateGen,
|
||||
},
|
||||
seenBlockCache: lruwrpr.New(10),
|
||||
badBlockCache: lruwrpr.New(10),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(f, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(f, err)
|
||||
topic = r.addDigestToTopic(topic, digest)
|
||||
|
||||
f.Add("junk", []byte("junk"), buf.Bytes(), []byte(topic))
|
||||
f.Fuzz(func(t *testing.T, pid string, from, data, topic []byte) {
|
||||
r.cfg.p2p = p2ptest.NewFuzzTestP2P()
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
cService := &mock.ChainService{
|
||||
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot*10000000), 0),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
DB: db,
|
||||
}
|
||||
r.cfg.chain = cService
|
||||
r.cfg.blockNotifier = cService.BlockNotifier()
|
||||
strTop := string(topic)
|
||||
msg := &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
From: from,
|
||||
Data: data,
|
||||
Topic: &strTop,
|
||||
},
|
||||
}
|
||||
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
|
||||
|
||||
@@ -96,7 +96,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("blockSlot", blk.Block().Slot()).Debug("Ignored block")
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Ignored block")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
if s.cfg.beaconDB.HasBlock(ctx, blockRoot) {
|
||||
@@ -105,8 +105,9 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
// Check if parent is a bad block and then reject the block.
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(blk.Block().ParentRoot())) {
|
||||
s.setBadBlock(ctx, blockRoot)
|
||||
e := fmt.Errorf("received block with root %#x that has an invalid parent %#x", blockRoot, blk.Block().ParentRoot())
|
||||
return pubsub.ValidationReject, e
|
||||
err := fmt.Errorf("received block with root %#x that has an invalid parent %#x", blockRoot, blk.Block().ParentRoot())
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Received block with an invalid parent")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
s.pendingQueueLock.RLock()
|
||||
@@ -121,24 +122,25 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
// earlier (SECONDS_PER_SLOT * 2 seconds). Queue such blocks and process them at the right slot.
|
||||
genesisTime := uint64(s.cfg.chain.GenesisTime().Unix())
|
||||
if err := slots.VerifyTime(genesisTime, blk.Block().Slot(), earlyBlockProcessingTolerance); err != nil {
|
||||
log.WithError(err).WithField("blockSlot", blk.Block().Slot()).Debug("Ignored block")
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Ignored block: could not verify slot time")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Add metrics for block arrival time subtracts slot start time.
|
||||
if err := captureArrivalTimeMetric(genesisTime, blk.Block().Slot()); err != nil {
|
||||
log.WithError(err).WithField("blockSlot", blk.Block().Slot()).Debug("Ignored block")
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Ignored block: could not capture arrival time metric")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
startSlot, err := slots.EpochStart(s.cfg.chain.FinalizedCheckpt().Epoch)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("blockSlot", blk.Block().Slot()).Debug("Ignored block")
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Ignored block: could not calculate epoch start slot")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
if startSlot >= blk.Block().Slot() {
|
||||
e := fmt.Errorf("finalized slot %d greater or equal to block slot %d", startSlot, blk.Block().Slot())
|
||||
return pubsub.ValidationIgnore, e
|
||||
err := fmt.Errorf("finalized slot %d greater or equal to block slot %d", startSlot, blk.Block().Slot())
|
||||
log.WithFields(getBlockFields(blk)).Debug(err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Process the block if the clock jitter is less than MAXIMUM_GOSSIP_CLOCK_DISPARITY.
|
||||
@@ -147,11 +149,13 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
s.pendingQueueLock.Lock()
|
||||
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blockRoot); err != nil {
|
||||
s.pendingQueueLock.Unlock()
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Could not insert block to pending queue")
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
s.pendingQueueLock.Unlock()
|
||||
e := fmt.Errorf("early block, with current slot %d < block slot %d", s.cfg.chain.CurrentSlot(), blk.Block().Slot())
|
||||
return pubsub.ValidationIgnore, e
|
||||
err := fmt.Errorf("early block, with current slot %d < block slot %d", s.cfg.chain.CurrentSlot(), blk.Block().Slot())
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Could not process early block")
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Handle block when the parent is unknown.
|
||||
@@ -159,10 +163,13 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
s.pendingQueueLock.Lock()
|
||||
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blockRoot); err != nil {
|
||||
s.pendingQueueLock.Unlock()
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Could not insert block to pending queue")
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
s.pendingQueueLock.Unlock()
|
||||
return pubsub.ValidationIgnore, errors.Errorf("unknown parent for block with slot %d and parent root %#x", blk.Block().Slot(), blk.Block().ParentRoot())
|
||||
err := errors.Errorf("unknown parent for block with slot %d and parent root %#x", blk.Block().Slot(), blk.Block().ParentRoot())
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Could not process early block")
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
err = s.validateBeaconBlock(ctx, blk, blockRoot)
|
||||
@@ -170,6 +177,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
// If the parent is optimistic, process the block as usual
|
||||
// This also does not penalize a peer which sends optimistic blocks
|
||||
if !errors.Is(ErrOptimisticParent, err) {
|
||||
log.WithError(err).WithFields(getBlockFields(blk)).Debug("Could not validate beacon block")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
}
|
||||
@@ -328,6 +336,7 @@ func (s *Service) setBadBlock(ctx context.Context, root [32]byte) {
|
||||
if ctx.Err() != nil { // Do not mark block as bad if it was due to context error.
|
||||
return
|
||||
}
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Debug("Inserting in invalid block cache")
|
||||
s.badBlockCache.Add(string(root[:]), true)
|
||||
}
|
||||
|
||||
@@ -356,3 +365,15 @@ func isBlockQueueable(genesisTime uint64, slot types.Slot, receivedTime time.Tim
|
||||
currentTimeWithDisparity := receivedTime.Add(params.BeaconNetworkConfig().MaximumGossipClockDisparity)
|
||||
return currentTimeWithDisparity.Unix() < slotTime.Unix()
|
||||
}
|
||||
|
||||
func getBlockFields(b interfaces.SignedBeaconBlock) logrus.Fields {
|
||||
if helpers.BeaconBlockIsNil(b) != nil {
|
||||
return logrus.Fields{}
|
||||
}
|
||||
return logrus.Fields{
|
||||
"slot": b.Block().Slot(),
|
||||
"proposerIndex": b.Block().ProposerIndex(),
|
||||
"graffiti": string(b.Block().Body().Graffiti()),
|
||||
"version": b.Block().Version(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1388,3 +1388,18 @@ func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) {
|
||||
result := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, true, result)
|
||||
}
|
||||
|
||||
func Test_getBlockFields(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
// Nil
|
||||
log.WithFields(getBlockFields(nil)).Info("nil block")
|
||||
// Good block
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
log.WithFields(getBlockFields(wb)).Info("bad block")
|
||||
|
||||
require.LogsContain(t, hook, "nil block")
|
||||
require.LogsContain(t, hook, "bad block")
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"accounts.go",
|
||||
"delete.go",
|
||||
"list.go",
|
||||
"wallet_utils.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/cmd/validator/accounts",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -15,6 +17,7 @@ go_library(
|
||||
"//runtime/tos:go_default_library",
|
||||
"//validator/accounts:go_default_library",
|
||||
"//validator/accounts/iface:go_default_library",
|
||||
"//validator/accounts/userprompt:go_default_library",
|
||||
"//validator/accounts/wallet:go_default_library",
|
||||
"//validator/client:go_default_library",
|
||||
"//validator/keymanager:go_default_library",
|
||||
@@ -23,3 +26,25 @@ go_library(
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["delete_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//validator/accounts:go_default_library",
|
||||
"//validator/accounts/wallet:go_default_library",
|
||||
"//validator/keymanager:go_default_library",
|
||||
"//validator/keymanager/local:go_default_library",
|
||||
"@com_github_google_uuid//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -35,11 +35,14 @@ var Commands = &cli.Command{
|
||||
if err := cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags); err != nil {
|
||||
return err
|
||||
}
|
||||
return tos.VerifyTosAcceptedOrPrompt(cliCtx)
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
features.ConfigureValidator(cliCtx)
|
||||
return nil
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
features.ConfigureValidator(cliCtx)
|
||||
if err := accounts.DeleteAccountCli(cliCtx); err != nil {
|
||||
if err := accountsDelete(cliCtx); err != nil {
|
||||
log.Fatalf("Could not delete account: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
63
cmd/validator/accounts/delete.go
Normal file
63
cmd/validator/accounts/delete.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/userprompt"
|
||||
"github.com/prysmaticlabs/prysm/validator/client"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func accountsDelete(c *cli.Context) error {
|
||||
w, km, err := walletWithKeymanager(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dialOpts := client.ConstructDialOptions(
|
||||
c.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name),
|
||||
c.String(flags.CertFlag.Name),
|
||||
c.Uint(flags.GrpcRetriesFlag.Name),
|
||||
c.Duration(flags.GrpcRetryDelayFlag.Name),
|
||||
)
|
||||
grpcHeaders := strings.Split(c.String(flags.GrpcHeadersFlag.Name), ",")
|
||||
|
||||
opts := []accounts.Option{
|
||||
accounts.WithWallet(w),
|
||||
accounts.WithKeymanager(km),
|
||||
accounts.WithGRPCDialOpts(dialOpts),
|
||||
accounts.WithBeaconRPCProvider(c.String(flags.BeaconRPCProviderFlag.Name)),
|
||||
accounts.WithGRPCHeaders(grpcHeaders),
|
||||
}
|
||||
|
||||
// Get full set of public keys from the keymanager.
|
||||
validatingPublicKeys, err := km.FetchValidatingPublicKeys(c.Context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(validatingPublicKeys) == 0 {
|
||||
return errors.New("wallet is empty, no accounts to delete")
|
||||
}
|
||||
// Filter keys either from CLI flag or from interactive session.
|
||||
filteredPubKeys, err := accounts.FilterPublicKeysFromUserInput(
|
||||
c,
|
||||
flags.DeletePublicKeysFlag,
|
||||
validatingPublicKeys,
|
||||
userprompt.SelectAccountsDeletePromptText,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not filter public keys for deletion")
|
||||
}
|
||||
opts = append(opts, accounts.WithFilteredPubKeys(filteredPubKeys))
|
||||
opts = append(opts, accounts.WithWalletKeyCount(len(validatingPublicKeys)))
|
||||
opts = append(opts, accounts.WithDeletePublicKeys(c.IsSet(flags.DeletePublicKeysFlag.Name)))
|
||||
|
||||
acc, err := accounts.NewCLIManager(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return acc.Delete(c.Context)
|
||||
}
|
||||
191
cmd/validator/accounts/delete_test.go
Normal file
191
cmd/validator/accounts/delete_test.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prysmaticlabs/prysm/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager/local"
|
||||
"github.com/urfave/cli/v2"
|
||||
keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4"
|
||||
)
|
||||
|
||||
const (
|
||||
passwordFileName = "password.txt"
|
||||
password = "OhWOWthisisatest42!$"
|
||||
)
|
||||
|
||||
func setupWalletAndPasswordsDir(t testing.TB) (string, string, string) {
|
||||
walletDir := filepath.Join(t.TempDir(), "wallet")
|
||||
passwordsDir := filepath.Join(t.TempDir(), "passwords")
|
||||
passwordFileDir := filepath.Join(t.TempDir(), "passwordFile")
|
||||
require.NoError(t, os.MkdirAll(passwordFileDir, params.BeaconIoConfig().ReadWriteExecutePermissions))
|
||||
passwordFilePath := filepath.Join(passwordFileDir, passwordFileName)
|
||||
require.NoError(t, os.WriteFile(passwordFilePath, []byte(password), os.ModePerm))
|
||||
return walletDir, passwordsDir, passwordFilePath
|
||||
}
|
||||
|
||||
// Returns the fullPath to the newly created keystore file.
|
||||
func createKeystore(t *testing.T, path string) (*keymanager.Keystore, string) {
|
||||
validatingKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
encryptor := keystorev4.New()
|
||||
cryptoFields, err := encryptor.Encrypt(validatingKey.Marshal(), password)
|
||||
require.NoError(t, err)
|
||||
id, err := uuid.NewRandom()
|
||||
require.NoError(t, err)
|
||||
keystoreFile := &keymanager.Keystore{
|
||||
Crypto: cryptoFields,
|
||||
ID: id.String(),
|
||||
Pubkey: fmt.Sprintf("%x", validatingKey.PublicKey().Marshal()),
|
||||
Version: encryptor.Version(),
|
||||
Name: encryptor.Name(),
|
||||
}
|
||||
encoded, err := json.MarshalIndent(keystoreFile, "", "\t")
|
||||
require.NoError(t, err)
|
||||
// Write the encoded keystore to disk with the timestamp appended
|
||||
createdAt := prysmTime.Now().Unix()
|
||||
fullPath := filepath.Join(path, fmt.Sprintf(local.KeystoreFileNameFormat, createdAt))
|
||||
require.NoError(t, os.WriteFile(fullPath, encoded, os.ModePerm))
|
||||
return keystoreFile, fullPath
|
||||
}
|
||||
|
||||
type testWalletConfig struct {
|
||||
exitAll bool
|
||||
skipDepositConfirm bool
|
||||
keymanagerKind keymanager.Kind
|
||||
numAccounts int64
|
||||
grpcHeaders string
|
||||
privateKeyFile string
|
||||
accountPasswordFile string
|
||||
walletPasswordFile string
|
||||
backupPasswordFile string
|
||||
backupPublicKeys string
|
||||
voluntaryExitPublicKeys string
|
||||
deletePublicKeys string
|
||||
keysDir string
|
||||
backupDir string
|
||||
walletDir string
|
||||
}
|
||||
|
||||
func setupWalletCtx(
|
||||
tb testing.TB,
|
||||
cfg *testWalletConfig,
|
||||
) *cli.Context {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(flags.WalletDirFlag.Name, cfg.walletDir, "")
|
||||
set.String(flags.KeysDirFlag.Name, cfg.keysDir, "")
|
||||
set.String(flags.KeymanagerKindFlag.Name, cfg.keymanagerKind.String(), "")
|
||||
set.String(flags.DeletePublicKeysFlag.Name, cfg.deletePublicKeys, "")
|
||||
set.String(flags.VoluntaryExitPublicKeysFlag.Name, cfg.voluntaryExitPublicKeys, "")
|
||||
set.String(flags.BackupDirFlag.Name, cfg.backupDir, "")
|
||||
set.String(flags.BackupPasswordFile.Name, cfg.backupPasswordFile, "")
|
||||
set.String(flags.BackupPublicKeysFlag.Name, cfg.backupPublicKeys, "")
|
||||
set.String(flags.WalletPasswordFileFlag.Name, cfg.walletPasswordFile, "")
|
||||
set.String(flags.AccountPasswordFileFlag.Name, cfg.accountPasswordFile, "")
|
||||
set.Int64(flags.NumAccountsFlag.Name, cfg.numAccounts, "")
|
||||
set.Bool(flags.SkipDepositConfirmationFlag.Name, cfg.skipDepositConfirm, "")
|
||||
set.Bool(flags.SkipMnemonic25thWordCheckFlag.Name, true, "")
|
||||
set.Bool(flags.ExitAllFlag.Name, cfg.exitAll, "")
|
||||
set.String(flags.GrpcHeadersFlag.Name, cfg.grpcHeaders, "")
|
||||
|
||||
if cfg.privateKeyFile != "" {
|
||||
set.String(flags.ImportPrivateKeyFileFlag.Name, cfg.privateKeyFile, "")
|
||||
assert.NoError(tb, set.Set(flags.ImportPrivateKeyFileFlag.Name, cfg.privateKeyFile))
|
||||
}
|
||||
assert.NoError(tb, set.Set(flags.WalletDirFlag.Name, cfg.walletDir))
|
||||
assert.NoError(tb, set.Set(flags.SkipMnemonic25thWordCheckFlag.Name, "true"))
|
||||
assert.NoError(tb, set.Set(flags.KeysDirFlag.Name, cfg.keysDir))
|
||||
assert.NoError(tb, set.Set(flags.KeymanagerKindFlag.Name, cfg.keymanagerKind.String()))
|
||||
assert.NoError(tb, set.Set(flags.DeletePublicKeysFlag.Name, cfg.deletePublicKeys))
|
||||
assert.NoError(tb, set.Set(flags.VoluntaryExitPublicKeysFlag.Name, cfg.voluntaryExitPublicKeys))
|
||||
assert.NoError(tb, set.Set(flags.BackupDirFlag.Name, cfg.backupDir))
|
||||
assert.NoError(tb, set.Set(flags.BackupPublicKeysFlag.Name, cfg.backupPublicKeys))
|
||||
assert.NoError(tb, set.Set(flags.BackupPasswordFile.Name, cfg.backupPasswordFile))
|
||||
assert.NoError(tb, set.Set(flags.WalletPasswordFileFlag.Name, cfg.walletPasswordFile))
|
||||
assert.NoError(tb, set.Set(flags.AccountPasswordFileFlag.Name, cfg.accountPasswordFile))
|
||||
assert.NoError(tb, set.Set(flags.NumAccountsFlag.Name, strconv.Itoa(int(cfg.numAccounts))))
|
||||
assert.NoError(tb, set.Set(flags.SkipDepositConfirmationFlag.Name, strconv.FormatBool(cfg.skipDepositConfirm)))
|
||||
assert.NoError(tb, set.Set(flags.ExitAllFlag.Name, strconv.FormatBool(cfg.exitAll)))
|
||||
assert.NoError(tb, set.Set(flags.GrpcHeadersFlag.Name, cfg.grpcHeaders))
|
||||
return cli.NewContext(&app, set, nil)
|
||||
}
|
||||
|
||||
func TestDeleteAccounts_Noninteractive(t *testing.T) {
|
||||
walletDir, _, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
// Write a directory where we will import keys from.
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
// Create 3 keystore files in the keys directory we can then
|
||||
// import from in our wallet.
|
||||
k1, _ := createKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
k2, _ := createKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
k3, _ := createKeystore(t, keysDir)
|
||||
generatedPubKeys := []string{k1.Pubkey, k2.Pubkey, k3.Pubkey}
|
||||
// Only delete keys 0 and 1.
|
||||
deletePublicKeys := strings.Join(generatedPubKeys[0:2], ",")
|
||||
|
||||
// We initialize a wallet with a local keymanager.
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
// Wallet configuration flags.
|
||||
walletDir: walletDir,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
// Flags required for ImportAccounts to work.
|
||||
keysDir: keysDir,
|
||||
// Flags required for DeleteAccounts to work.
|
||||
deletePublicKeys: deletePublicKeys,
|
||||
})
|
||||
w, err := accounts.CreateWalletWithKeymanager(cliCtx.Context, &accounts.CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// We attempt to import accounts.
|
||||
require.NoError(t, accounts.ImportAccountsCli(cliCtx))
|
||||
|
||||
// We attempt to delete the accounts specified.
|
||||
require.NoError(t, accountsDelete(cliCtx))
|
||||
|
||||
keymanager, err := local.NewKeymanager(
|
||||
cliCtx.Context,
|
||||
&local.SetupConfig{
|
||||
Wallet: w,
|
||||
ListenForChanges: false,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
remainingAccounts, err := keymanager.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(remainingAccounts), 1)
|
||||
remainingPublicKey, err := hex.DecodeString(k3.Pubkey)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, remainingAccounts[0], bytesutil.ToBytes48(remainingPublicKey))
|
||||
}
|
||||
@@ -3,14 +3,10 @@ package accounts
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/iface"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/validator/client"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -51,22 +47,3 @@ func accountsList(c *cli.Context) error {
|
||||
c.Context,
|
||||
)
|
||||
}
|
||||
|
||||
func walletWithKeymanager(c *cli.Context) (*wallet.Wallet, keymanager.IKeymanager, error) {
|
||||
w, err := wallet.OpenWalletOrElseCli(c, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
return nil, wallet.ErrNoWalletFound
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not open wallet")
|
||||
}
|
||||
// TODO(#9883) - Remove this when we have a better way to handle this. this is fine.
|
||||
// genesis root is not set here which is used for sign function, but fetch keys should be fine.
|
||||
km, err := w.InitializeKeymanager(c.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
if err != nil && strings.Contains(err.Error(), keymanager.IncorrectPasswordErrMsg) {
|
||||
return nil, nil, errors.New("wrong wallet password entered")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, accounts.ErrCouldNotInitializeKeymanager)
|
||||
}
|
||||
return w, km, nil
|
||||
}
|
||||
|
||||
31
cmd/validator/accounts/wallet_utils.go
Normal file
31
cmd/validator/accounts/wallet_utils.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/iface"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func walletWithKeymanager(c *cli.Context) (*wallet.Wallet, keymanager.IKeymanager, error) {
|
||||
w, err := wallet.OpenWalletOrElseCli(c, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
return nil, wallet.ErrNoWalletFound
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not open wallet")
|
||||
}
|
||||
// TODO(#9883) - Remove this when we have a better way to handle this. this is fine.
|
||||
// genesis root is not set here which is used for sign function, but fetch keys should be fine.
|
||||
km, err := w.InitializeKeymanager(c.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
if err != nil && strings.Contains(err.Error(), keymanager.IncorrectPasswordErrMsg) {
|
||||
return nil, nil, errors.New("wrong wallet password entered")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, accounts.ErrCouldNotInitializeKeymanager)
|
||||
}
|
||||
return w, km, nil
|
||||
}
|
||||
@@ -144,6 +144,7 @@ type BeaconChainConfig struct {
|
||||
ShardingForkVersion []byte `yaml:"SHARDING_FORK_VERSION" spec:"true"` // ShardingForkVersion is used to represent the fork version for sharding.
|
||||
ShardingForkEpoch types.Epoch `yaml:"SHARDING_FORK_EPOCH" spec:"true"` // ShardingForkEpoch is used to represent the assigned fork epoch for sharding.
|
||||
ForkVersionSchedule map[[fieldparams.VersionLength]byte]types.Epoch // Schedule of fork epochs by version.
|
||||
ForkVersionNames map[[fieldparams.VersionLength]byte]string // Human-readable names of fork versions.
|
||||
|
||||
// Weak subjectivity values.
|
||||
SafetyDecay uint64 // SafetyDecay is defined as the loss in the 1/3 consensus safety margin of the casper FFG mechanism.
|
||||
@@ -195,6 +196,7 @@ type BeaconChainConfig struct {
|
||||
func (b *BeaconChainConfig) InitializeForkSchedule() {
|
||||
// Reset Fork Version Schedule.
|
||||
b.ForkVersionSchedule = configForkSchedule(b)
|
||||
b.ForkVersionNames = configForkNames(b)
|
||||
}
|
||||
|
||||
func configForkSchedule(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]types.Epoch {
|
||||
@@ -207,3 +209,14 @@ func configForkSchedule(b *BeaconChainConfig) map[[fieldparams.VersionLength]byt
|
||||
fvs[bytesutil.ToBytes4(b.BellatrixForkVersion)] = b.BellatrixForkEpoch
|
||||
return fvs
|
||||
}
|
||||
|
||||
func configForkNames(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]string {
|
||||
fvn := map[[fieldparams.VersionLength]byte]string{}
|
||||
// Set Genesis fork data.
|
||||
fvn[bytesutil.ToBytes4(b.GenesisForkVersion)] = "phase0"
|
||||
// Set Altair fork data.
|
||||
fvn[bytesutil.ToBytes4(b.AltairForkVersion)] = "altair"
|
||||
// Set Bellatrix fork data.
|
||||
fvn[bytesutil.ToBytes4(b.BellatrixForkVersion)] = "bellatrix"
|
||||
return fvn
|
||||
}
|
||||
|
||||
@@ -22,7 +22,8 @@ func UseE2EMainnetConfig() {
|
||||
}
|
||||
|
||||
// E2ETestConfig retrieves the configurations made specifically for E2E testing.
|
||||
// Warning: This config is only for testing, it is not meant for use outside of E2E.
|
||||
//
|
||||
// WARNING: This config is only for testing, it is not meant for use outside of E2E.
|
||||
func E2ETestConfig() *BeaconChainConfig {
|
||||
e2eConfig := MinimalSpecConfig()
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ package trie
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
@@ -29,7 +29,7 @@ func NewTrie(depth uint64) (*SparseMerkleTrie, error) {
|
||||
}
|
||||
|
||||
// CreateTrieFromProto creates a Sparse Merkle Trie from its corresponding merkle trie.
|
||||
func CreateTrieFromProto(trieObj *protodb.SparseMerkleTrie) *SparseMerkleTrie {
|
||||
func CreateTrieFromProto(trieObj *protodb.SparseMerkleTrie) (*SparseMerkleTrie, error) {
|
||||
trie := &SparseMerkleTrie{
|
||||
depth: uint(trieObj.Depth),
|
||||
originalItems: trieObj.OriginalItems,
|
||||
@@ -39,7 +39,29 @@ func CreateTrieFromProto(trieObj *protodb.SparseMerkleTrie) *SparseMerkleTrie {
|
||||
branches[i] = layer.Layer
|
||||
}
|
||||
trie.branches = branches
|
||||
return trie
|
||||
|
||||
if err := trie.validate(); err != nil {
|
||||
return nil, errors.Wrap(err, "invalid sparse merkle trie")
|
||||
}
|
||||
|
||||
return trie, nil
|
||||
}
|
||||
|
||||
func (m *SparseMerkleTrie) validate() error {
|
||||
if len(m.branches) == 0 {
|
||||
return errors.New("no branches")
|
||||
}
|
||||
if len(m.branches[len(m.branches)-1]) == 0 {
|
||||
return errors.New("invalid branches provided")
|
||||
}
|
||||
if m.depth >= uint(len(m.branches)) {
|
||||
return errors.New("depth is greater than or equal to number of branches")
|
||||
}
|
||||
if m.depth >= 64 {
|
||||
return errors.New("depth exceeds 64") // PowerOf2 would overflow.
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateTrieFromItems constructs a Merkle trie from a sequence of byte slices.
|
||||
@@ -82,10 +104,6 @@ func (m *SparseMerkleTrie) Items() [][]byte {
|
||||
// Spec Definition:
|
||||
// sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24)))
|
||||
func (m *SparseMerkleTrie) HashTreeRoot() ([32]byte, error) {
|
||||
if len(m.branches) == 0 || len(m.branches[len(m.branches)-1]) == 0 {
|
||||
return [32]byte{}, errors.New("invalid branches provided to compute root")
|
||||
}
|
||||
|
||||
enc := [32]byte{}
|
||||
depositCount := uint64(len(m.originalItems))
|
||||
if len(m.originalItems) == 1 && bytes.Equal(m.originalItems[0], ZeroHashes[0][:]) {
|
||||
@@ -101,12 +119,6 @@ func (m *SparseMerkleTrie) Insert(item []byte, index int) error {
|
||||
if index < 0 {
|
||||
return fmt.Errorf("negative index provided: %d", index)
|
||||
}
|
||||
if len(m.branches) == 0 {
|
||||
return errors.New("invalid trie: no branches")
|
||||
}
|
||||
if m.depth > uint(len(m.branches)) {
|
||||
return errors.New("invalid trie: depth is greater than number of branches")
|
||||
}
|
||||
for index >= len(m.branches[0]) {
|
||||
m.branches[0] = append(m.branches[0], ZeroHashes[0][:])
|
||||
}
|
||||
@@ -153,16 +165,10 @@ func (m *SparseMerkleTrie) MerkleProof(index int) ([][]byte, error) {
|
||||
if index < 0 {
|
||||
return nil, fmt.Errorf("merkle index is negative: %d", index)
|
||||
}
|
||||
if len(m.branches) == 0 {
|
||||
return nil, errors.New("invalid trie: no branches")
|
||||
}
|
||||
leaves := m.branches[0]
|
||||
if index >= len(leaves) {
|
||||
return nil, fmt.Errorf("merkle index out of range in trie, max range: %d, received: %d", len(leaves), index)
|
||||
}
|
||||
if m.depth > uint(len(m.branches)) {
|
||||
return nil, errors.New("invalid trie: depth is greater than number of branches")
|
||||
}
|
||||
merkleIndex := uint(index)
|
||||
proof := make([][]byte, m.depth+1)
|
||||
for i := uint(0); i < m.depth; i++ {
|
||||
|
||||
@@ -16,6 +16,68 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestCreateTrieFromProto_Validation(t *testing.T) {
|
||||
h := hash.Hash([]byte("hi"))
|
||||
genValidLayers := func(num int) []*ethpb.TrieLayer {
|
||||
l := make([]*ethpb.TrieLayer, num)
|
||||
for i := 0; i < num; i++ {
|
||||
l[i] = ðpb.TrieLayer{
|
||||
Layer: [][]byte{h[:]},
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
tests := []struct {
|
||||
trie *ethpb.SparseMerkleTrie
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
trie: ðpb.SparseMerkleTrie{
|
||||
Layers: []*ethpb.TrieLayer{},
|
||||
Depth: 0,
|
||||
},
|
||||
errString: "no branches",
|
||||
},
|
||||
{
|
||||
trie: ðpb.SparseMerkleTrie{
|
||||
Layers: []*ethpb.TrieLayer{
|
||||
{
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
{
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
{
|
||||
Layer: [][]byte{},
|
||||
},
|
||||
},
|
||||
Depth: 2,
|
||||
},
|
||||
errString: "invalid branches provided",
|
||||
},
|
||||
{
|
||||
trie: ðpb.SparseMerkleTrie{
|
||||
Layers: genValidLayers(3),
|
||||
Depth: 12,
|
||||
},
|
||||
errString: "depth is greater than or equal to number of branches",
|
||||
},
|
||||
{
|
||||
trie: ðpb.SparseMerkleTrie{
|
||||
Layers: genValidLayers(66),
|
||||
Depth: 65,
|
||||
},
|
||||
errString: "depth exceeds 64",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.errString, func(t *testing.T) {
|
||||
_, err := trie.CreateTrieFromProto(tt.trie)
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalDepositWithProof(t *testing.T) {
|
||||
items := [][]byte{
|
||||
[]byte("A"),
|
||||
@@ -52,7 +114,7 @@ func TestMarshalDepositWithProof(t *testing.T) {
|
||||
|
||||
func TestMerkleTrie_MerkleProofOutOfRange(t *testing.T) {
|
||||
h := hash.Hash([]byte("hi"))
|
||||
m := trie.CreateTrieFromProto(ðpb.SparseMerkleTrie{
|
||||
m, err := trie.CreateTrieFromProto(ðpb.SparseMerkleTrie{
|
||||
Layers: []*ethpb.TrieLayer{
|
||||
{
|
||||
Layer: [][]byte{h[:]},
|
||||
@@ -61,11 +123,12 @@ func TestMerkleTrie_MerkleProofOutOfRange(t *testing.T) {
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
{
|
||||
Layer: [][]byte{},
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
},
|
||||
Depth: 4,
|
||||
Depth: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if _, err := m.MerkleProof(6); err == nil {
|
||||
t.Error("Expected out of range failure, received nil", err)
|
||||
}
|
||||
@@ -128,6 +191,7 @@ func TestMerkleTrie_VerifyMerkleProof(t *testing.T) {
|
||||
[]byte("G"),
|
||||
[]byte("H"),
|
||||
}
|
||||
|
||||
m, err := trie.GenerateTrieFromItems(items, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err)
|
||||
proof, err := m.MerkleProof(0)
|
||||
@@ -209,7 +273,8 @@ func TestRoundtripProto_OK(t *testing.T) {
|
||||
depositRoot, err := m.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
newTrie := trie.CreateTrieFromProto(protoTrie)
|
||||
newTrie, err := trie.CreateTrieFromProto(protoTrie)
|
||||
require.NoError(t, err)
|
||||
root, err := newTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, depositRoot, root)
|
||||
|
||||
@@ -22,10 +22,10 @@ func FuzzSparseMerkleTrie_HashTreeRoot(f *testing.F) {
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
{
|
||||
Layer: [][]byte{},
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
},
|
||||
Depth: 4,
|
||||
Depth: 2,
|
||||
}
|
||||
b, err := proto.Marshal(pb)
|
||||
require.NoError(f, err)
|
||||
@@ -36,10 +36,13 @@ func FuzzSparseMerkleTrie_HashTreeRoot(f *testing.F) {
|
||||
if err := proto.Unmarshal(b, pb); err != nil {
|
||||
return
|
||||
}
|
||||
_, err := trie.CreateTrieFromProto(pb).HashTreeRoot()
|
||||
smt, err := trie.CreateTrieFromProto(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if _, err := smt.HashTreeRoot(); err != nil {
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -54,10 +57,10 @@ func FuzzSparseMerkleTrie_MerkleProof(f *testing.F) {
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
{
|
||||
Layer: [][]byte{},
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
},
|
||||
Depth: 4,
|
||||
Depth: 2,
|
||||
}
|
||||
b, err := proto.Marshal(pb)
|
||||
require.NoError(f, err)
|
||||
@@ -68,10 +71,13 @@ func FuzzSparseMerkleTrie_MerkleProof(f *testing.F) {
|
||||
if err := proto.Unmarshal(b, pb); err != nil {
|
||||
return
|
||||
}
|
||||
_, err := trie.CreateTrieFromProto(pb).MerkleProof(i)
|
||||
smt, err := trie.CreateTrieFromProto(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if _, err := smt.MerkleProof(i); err != nil {
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -86,10 +92,10 @@ func FuzzSparseMerkleTrie_Insert(f *testing.F) {
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
{
|
||||
Layer: [][]byte{},
|
||||
Layer: [][]byte{h[:]},
|
||||
},
|
||||
},
|
||||
Depth: 4,
|
||||
Depth: 2,
|
||||
}
|
||||
b, err := proto.Marshal(pb)
|
||||
require.NoError(f, err)
|
||||
@@ -100,7 +106,11 @@ func FuzzSparseMerkleTrie_Insert(f *testing.F) {
|
||||
if err := proto.Unmarshal(b, pb); err != nil {
|
||||
return
|
||||
}
|
||||
if err := trie.CreateTrieFromProto(pb).Insert(item, i); err != nil {
|
||||
smt, err := trie.CreateTrieFromProto(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err := smt.Insert(item, i); err != nil {
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
go test fuzz v1
|
||||
[]byte("\b\x03\x12\"2 00000000000000000000000000000000\x12\"2 00000000000000000000000000000000\x12\x00")
|
||||
[]byte("")
|
||||
int(0)
|
||||
@@ -42,7 +42,6 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -136,7 +136,7 @@ func slotFromBlock(marshaled []byte) (types.Slot, error) {
|
||||
return types.Slot(slot), nil
|
||||
}
|
||||
|
||||
var errBlockForkMismatch = errors.New("fork or config detected from state is different than block")
|
||||
var errBlockForkMismatch = errors.New("fork or config detected in unmarshaler is different than block")
|
||||
|
||||
// UnmarshalBeaconBlock uses internal knowledge in the VersionedUnmarshaler to pick the right concrete SignedBeaconBlock type,
|
||||
// then Unmarshal()s the type and returns an instance of block.SignedBeaconBlock if successful.
|
||||
@@ -145,19 +145,9 @@ func (cf *VersionedUnmarshaler) UnmarshalBeaconBlock(marshaled []byte) (interfac
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// heuristic to make sure block is from the same version as the VersionedUnmarshaler.
|
||||
// Look up the version for the epoch that the block is from, then ensure that it matches the Version in the
|
||||
// VersionedUnmarshaler.
|
||||
epoch := slots.ToEpoch(slot)
|
||||
fs := forks.NewOrderedSchedule(cf.Config)
|
||||
ver, err := fs.VersionForEpoch(epoch)
|
||||
if err != nil {
|
||||
if err := cf.validateVersion(slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ver != cf.Version {
|
||||
return nil, errors.Wrapf(errBlockForkMismatch, "slot=%d, epoch=%d, version=%#x", slot, epoch, ver)
|
||||
}
|
||||
|
||||
var blk ssz.Unmarshaler
|
||||
switch cf.Fork {
|
||||
@@ -177,3 +167,50 @@ func (cf *VersionedUnmarshaler) UnmarshalBeaconBlock(marshaled []byte) (interfac
|
||||
}
|
||||
return wrapper.WrappedSignedBeaconBlock(blk)
|
||||
}
|
||||
|
||||
// UnmarshalBlindedBeaconBlock uses internal knowledge in the VersionedUnmarshaler to pick the right concrete blinded SignedBeaconBlock type,
|
||||
// then Unmarshal()s the type and returns an instance of block.SignedBeaconBlock if successful.
|
||||
// For Phase0 and Altair it works exactly line UnmarshalBeaconBlock.
|
||||
func (cf *VersionedUnmarshaler) UnmarshalBlindedBeaconBlock(marshaled []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
slot, err := slotFromBlock(marshaled)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cf.validateVersion(slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var blk ssz.Unmarshaler
|
||||
switch cf.Fork {
|
||||
case version.Phase0:
|
||||
blk = ðpb.SignedBeaconBlock{}
|
||||
case version.Altair:
|
||||
blk = ðpb.SignedBeaconBlockAltair{}
|
||||
case version.Bellatrix:
|
||||
blk = ðpb.SignedBlindedBeaconBlockBellatrix{}
|
||||
default:
|
||||
forkName := version.String(cf.Fork)
|
||||
return nil, fmt.Errorf("unable to initialize BeaconBlock for fork version=%s at slot=%d", forkName, slot)
|
||||
}
|
||||
err = blk.UnmarshalSSZ(marshaled)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal SignedBeaconBlock in UnmarshalSSZ")
|
||||
}
|
||||
return wrapper.WrappedSignedBeaconBlock(blk)
|
||||
}
|
||||
|
||||
// Heuristic to make sure block is from the same version as the VersionedUnmarshaler.
|
||||
// Look up the version for the epoch that the block is from, then ensure that it matches the Version in the
|
||||
// VersionedUnmarshaler.
|
||||
func (cf *VersionedUnmarshaler) validateVersion(slot types.Slot) error {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
fs := forks.NewOrderedSchedule(cf.Config)
|
||||
ver, err := fs.VersionForEpoch(epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ver != cf.Version {
|
||||
return errors.Wrapf(errBlockForkMismatch, "slot=%d, epoch=%d, version=%#x", slot, epoch, ver)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -16,13 +16,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestSlotFromBlock(t *testing.T) {
|
||||
b := testBlockGenesis()
|
||||
b := util.NewBeaconBlock()
|
||||
var slot types.Slot = 3
|
||||
b.Block.Slot = slot
|
||||
bb, err := b.MarshalSSZ()
|
||||
@@ -31,7 +30,7 @@ func TestSlotFromBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, sfb)
|
||||
|
||||
ba := testBlockAltair()
|
||||
ba := util.NewBeaconBlockAltair()
|
||||
ba.Block.Slot = slot
|
||||
bab, err := ba.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -39,7 +38,7 @@ func TestSlotFromBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, sfba)
|
||||
|
||||
bm := testBlockBellatrix()
|
||||
bm := util.NewBeaconBlockBellatrix()
|
||||
bm.Block.Slot = slot
|
||||
bmb, err := ba.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -277,125 +276,123 @@ func TestUnmarshalBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalBlindedBlock(t *testing.T) {
|
||||
bc, cleanup := hackBellatrixMaxuint()
|
||||
defer cleanup()
|
||||
require.Equal(t, types.Epoch(math.MaxUint32), params.KnownConfigs[params.MainnetName]().BellatrixForkEpoch)
|
||||
genv := bytesutil.ToBytes4(bc.GenesisForkVersion)
|
||||
altairv := bytesutil.ToBytes4(bc.AltairForkVersion)
|
||||
bellav := bytesutil.ToBytes4(bc.BellatrixForkVersion)
|
||||
altairS, err := slots.EpochStart(bc.AltairForkEpoch)
|
||||
bellaS, err := slots.EpochStart(bc.BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
b func(*testing.T, types.Slot) interfaces.SignedBeaconBlock
|
||||
name string
|
||||
version [4]byte
|
||||
slot types.Slot
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "genesis - slot 0",
|
||||
b: signedTestBlockGenesis,
|
||||
version: genv,
|
||||
},
|
||||
{
|
||||
name: "last slot of phase 0",
|
||||
b: signedTestBlockGenesis,
|
||||
version: genv,
|
||||
slot: altairS - 1,
|
||||
},
|
||||
{
|
||||
name: "first slot of altair",
|
||||
b: signedTestBlockAltair,
|
||||
version: altairv,
|
||||
slot: altairS,
|
||||
},
|
||||
{
|
||||
name: "last slot of altair",
|
||||
b: signedTestBlockAltair,
|
||||
version: altairv,
|
||||
slot: bellaS - 1,
|
||||
},
|
||||
{
|
||||
name: "first slot of bellatrix",
|
||||
b: signedTestBlindedBlockBellatrix,
|
||||
version: bellav,
|
||||
slot: bellaS,
|
||||
},
|
||||
{
|
||||
name: "bellatrix block in altair slot",
|
||||
b: signedTestBlindedBlockBellatrix,
|
||||
version: bellav,
|
||||
slot: bellaS - 1,
|
||||
err: errBlockForkMismatch,
|
||||
},
|
||||
{
|
||||
name: "genesis block in altair slot",
|
||||
b: signedTestBlockGenesis,
|
||||
version: genv,
|
||||
slot: bellaS - 1,
|
||||
err: errBlockForkMismatch,
|
||||
},
|
||||
{
|
||||
name: "altair block in genesis slot",
|
||||
b: signedTestBlockAltair,
|
||||
version: altairv,
|
||||
err: errBlockForkMismatch,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := c.b(t, c.slot)
|
||||
marshaled, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
cf, err := FromForkVersion(c.version)
|
||||
require.NoError(t, err)
|
||||
bcf, err := cf.UnmarshalBlindedBeaconBlock(marshaled)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
expected, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
actual, err := bcf.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func signedTestBlockGenesis(t *testing.T, slot types.Slot) interfaces.SignedBeaconBlock {
|
||||
b := testBlockGenesis()
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = slot
|
||||
s, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
func testBlockGenesis() *ethpb.SignedBeaconBlock {
|
||||
return ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
ProposerIndex: types.ValidatorIndex(0),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Graffiti: make([]byte, 32),
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{},
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{},
|
||||
Attestations: []*ethpb.Attestation{},
|
||||
Deposits: []*ethpb.Deposit{},
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
}
|
||||
|
||||
func signedTestBlockAltair(t *testing.T, slot types.Slot) interfaces.SignedBeaconBlock {
|
||||
b := testBlockAltair()
|
||||
b := util.NewBeaconBlockAltair()
|
||||
b.Block.Slot = slot
|
||||
s, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
func testBlockAltair() *ethpb.SignedBeaconBlockAltair {
|
||||
return ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
ProposerIndex: types.ValidatorIndex(0),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{},
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{},
|
||||
Attestations: []*ethpb.Attestation{},
|
||||
Deposits: []*ethpb.Deposit{},
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{},
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
}
|
||||
|
||||
func signedTestBlockBellatrix(t *testing.T, slot types.Slot) interfaces.SignedBeaconBlock {
|
||||
b := testBlockBellatrix()
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = slot
|
||||
s, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
func testBlockBellatrix() *ethpb.SignedBeaconBlockBellatrix {
|
||||
return ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
ProposerIndex: types.ValidatorIndex(0),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{},
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{},
|
||||
Attestations: []*ethpb.Attestation{},
|
||||
Deposits: []*ethpb.Deposit{},
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{},
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
BlockNumber: 0,
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
Timestamp: 0,
|
||||
ExtraData: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
PrevRandao: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
func signedTestBlindedBlockBellatrix(t *testing.T, slot types.Slot) interfaces.SignedBeaconBlock {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = slot
|
||||
s, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package forks
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
@@ -13,10 +14,11 @@ import (
|
||||
type ForkScheduleEntry struct {
|
||||
Version [fieldparams.VersionLength]byte
|
||||
Epoch types.Epoch
|
||||
Name string
|
||||
}
|
||||
|
||||
// OrderedSchedule provides a type that can be used to sort the fork schedule and find the Version
|
||||
// the chain should be at for a given epoch (via VersionForEpoch).
|
||||
// the chain should be at for a given epoch (via VersionForEpoch) or name (via VersionForName).
|
||||
type OrderedSchedule []ForkScheduleEntry
|
||||
|
||||
// Len implements the Len method of sort.Interface
|
||||
@@ -38,6 +40,17 @@ func (o OrderedSchedule) VersionForEpoch(epoch types.Epoch) ([fieldparams.Versio
|
||||
return [fieldparams.VersionLength]byte{}, errors.Wrapf(ErrVersionNotFound, "no epoch in list <= %d", epoch)
|
||||
}
|
||||
|
||||
// VersionForName finds the Version corresponding to the lowercase version of the provided name.
|
||||
func (o OrderedSchedule) VersionForName(name string) ([fieldparams.VersionLength]byte, error) {
|
||||
lower := strings.ToLower(name)
|
||||
for _, e := range o {
|
||||
if e.Name == lower {
|
||||
return e.Version, nil
|
||||
}
|
||||
}
|
||||
return [4]byte{}, errors.Wrapf(ErrVersionNotFound, "no version with name %s", lower)
|
||||
}
|
||||
|
||||
func (o OrderedSchedule) Previous(version [fieldparams.VersionLength]byte) ([fieldparams.VersionLength]byte, error) {
|
||||
for i := len(o) - 1; i >= 0; i-- {
|
||||
if o[i].Version == version {
|
||||
@@ -51,7 +64,7 @@ func (o OrderedSchedule) Previous(version [fieldparams.VersionLength]byte) ([fie
|
||||
return [fieldparams.VersionLength]byte{}, errors.Wrapf(ErrVersionNotFound, "no version in list == %#x", version)
|
||||
}
|
||||
|
||||
// Converts the ForkVersionSchedule map into a list of Version+Epoch values, ordered by Epoch from lowest to highest.
|
||||
// NewOrderedSchedule Converts fork version maps into a list of Version+Epoch+Name values, ordered by Epoch from lowest to highest.
|
||||
// See docs for OrderedSchedule for more detail on what you can do with this type.
|
||||
func NewOrderedSchedule(b *params.BeaconChainConfig) OrderedSchedule {
|
||||
ofs := make(OrderedSchedule, 0)
|
||||
@@ -59,6 +72,7 @@ func NewOrderedSchedule(b *params.BeaconChainConfig) OrderedSchedule {
|
||||
fse := ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: epoch,
|
||||
Name: b.ForkVersionNames[version],
|
||||
}
|
||||
ofs = append(ofs, fse)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user