mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
35 Commits
fuzz-conve
...
mick-bazel
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3bbaa5e9c5 | ||
|
|
9fab9df61e | ||
|
|
eedafac822 | ||
|
|
e9ad7aeff8 | ||
|
|
3cfef20938 | ||
|
|
e90284bc00 | ||
|
|
01e15a033f | ||
|
|
f09b06d6f6 | ||
|
|
16e66ee1b8 | ||
|
|
3d3890205f | ||
|
|
a90335b15e | ||
|
|
8cd43d216f | ||
|
|
d25c0ec1a5 | ||
|
|
e1c4427ea5 | ||
|
|
7042791e31 | ||
|
|
e771585b77 | ||
|
|
98622a052f | ||
|
|
61033ebea1 | ||
|
|
e808025b17 | ||
|
|
7db0435ee0 | ||
|
|
1f086e4333 | ||
|
|
184e5be9de | ||
|
|
e33850bf51 | ||
|
|
cc643ac4cc | ||
|
|
abefe1e9d5 | ||
|
|
b4e89fb28b | ||
|
|
4ad1c4df01 | ||
|
|
6a197b47d9 | ||
|
|
d102421a25 | ||
|
|
7b1490429c | ||
|
|
bbdf19cfd0 | ||
|
|
5d94030b4f | ||
|
|
16273a2040 | ||
|
|
97663548a1 | ||
|
|
7d9d8454b1 |
@@ -1 +0,0 @@
|
||||
5.0.0
|
||||
59
README_WINDOWS.md
Normal file
59
README_WINDOWS.md
Normal file
@@ -0,0 +1,59 @@
|
||||
- install wsl 2 on your windows machine, verify it's wsl 2 and not 1
|
||||
|
||||
> sudo apt install npm
|
||||
|
||||
- can't install npm
|
||||
|
||||
> export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
|
||||
- then follow guidance here https://docs.prylabs.network/docs/install/install-with-bazel/
|
||||
- version mismatch error, even when forcing the .bazelversion version to be installed
|
||||
- delete .bazelversion, just make sure you keep WSL's bazel version in sync with the one specified in .bazelversion
|
||||
|
||||
> sudo apt install bazel --version=5.0.0
|
||||
> bazel build //beacon-chain:beacon-chain --config=release
|
||||
|
||||
- workspace_status.sh not found
|
||||
- use --workspace_status_command=/bin/true per https://docs.bazel.build/versions/main/user-manual.html#workspace_status
|
||||
|
||||
> bazel build //cmd/beacon-chain:beacon-chain --config=release --workspace_status_command=/bin/true
|
||||
|
||||
- error related to https://stackoverflow.com/questions/48674104/clang-error-while-loading-shared-libraries-libtinfo-so-5-cannot-open-shared-o
|
||||
|
||||
> sudo apt install libncurses5
|
||||
> bazel build //cmd/beacon-chain:beacon-chain --config=release --workspace_status_command=/bin/true
|
||||
|
||||
- 'gmpxx.h' file not found
|
||||
|
||||
> sudo apt-get install libgmp-dev
|
||||
> bazel build //cmd/beacon-chain:beacon-chain --config=release --workspace_status_command=/bin/true
|
||||
|
||||
works!
|
||||
|
||||
start geth on localhost:8545
|
||||
|
||||
> bazel run //beacon-chain --config=release -- --http-web3provider=http://localhost:8545
|
||||
|
||||
works!
|
||||
|
||||
open vs code with prysm code
|
||||
|
||||
if you see errors, make sure you install https://www.mingw-w64.org/downloads/#mingw-builds
|
||||
|
||||
I used https://www.msys2.org/ and followed the instructions on that page
|
||||
in combination with instructions on this page https://code.visualstudio.com/docs/cpp/config-mingw#_prerequisites
|
||||
|
||||
blst errors... try --define=blst_modern=true
|
||||
|
||||
bazel build //validator:validator --config=release --workspace_status_command=/bin/true --define=blst_modern=true
|
||||
|
||||
-----
|
||||
|
||||
> bazel build //cmd/beacon-chain:beacon-chain --workspace_status_command=/bin/true --define=blst_modern=true
|
||||
> bazel build //cmd/validator:validator --workspace_status_command=/bin/true --define=blst_modern=true
|
||||
|
||||
---
|
||||
|
||||
then issue this command bazel run //beacon-chain -- --datadir /tmp/chaindata --force-clear-db --interop-genesis-state /tmp/genesis.ssz --interop-eth1data-votes --min-sync-peers=0 --http-web3provider=http://localhost:8545 --deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 --bootstrap-node= --chain-config-file=/tmp/merge.yml
|
||||
|
||||
|
||||
@@ -137,6 +137,7 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
@@ -188,6 +189,7 @@ go_test(
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves chain info related data.
|
||||
// directly retrieve chain info related data.
|
||||
type ChainInfoFetcher interface {
|
||||
HeadFetcher
|
||||
FinalizationFetcher
|
||||
@@ -31,6 +31,12 @@ type ChainInfoFetcher interface {
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
// HeadUpdater defines a common interface for methods in blockchain service
|
||||
// which allow to update the head info
|
||||
type HeadUpdater interface {
|
||||
UpdateHead(context.Context) error
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
type TimeFetcher interface {
|
||||
GenesisTime() time.Time
|
||||
@@ -43,7 +49,7 @@ type GenesisFetcher interface {
|
||||
}
|
||||
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves head related data.
|
||||
// directly retrieve head related data.
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() types.Slot
|
||||
HeadRoot(ctx context.Context) ([]byte, error)
|
||||
@@ -55,8 +61,6 @@ type HeadFetcher interface {
|
||||
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
|
||||
ChainHeads() ([][32]byte, []types.Slot)
|
||||
IsOptimistic(ctx context.Context) (bool, error)
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
@@ -73,7 +77,7 @@ type CanonicalFetcher interface {
|
||||
}
|
||||
|
||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves finalization and justification related data.
|
||||
// directly retrieve finalization and justification related data.
|
||||
type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
@@ -81,6 +85,12 @@ type FinalizationFetcher interface {
|
||||
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
|
||||
}
|
||||
|
||||
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
||||
type OptimisticModeFetcher interface {
|
||||
IsOptimistic(ctx context.Context) (bool, error)
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.store.FinalizedCheckpt()
|
||||
@@ -232,7 +242,7 @@ func (s *Service) GenesisTime() time.Time {
|
||||
return s.genesisTime
|
||||
}
|
||||
|
||||
// GenesisValidatorsRoot returns the genesis validator
|
||||
// GenesisValidatorsRoot returns the genesis validators
|
||||
// root of the chain.
|
||||
func (s *Service) GenesisValidatorsRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
@@ -299,7 +309,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
// ForkChoicer returns the forkchoice interface
|
||||
// ForkChoicer returns the forkchoice interface.
|
||||
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
@@ -315,7 +325,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
return s.IsOptimisticForRoot(ctx, s.head.root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||
@@ -345,7 +355,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -363,7 +373,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, err
|
||||
}
|
||||
|
||||
// historical non-canonical blocks here are returned as optimistic for safety.
|
||||
// Historical non-canonical blocks here are returned as optimistic for safety.
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
@@ -372,7 +382,7 @@ func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
}
|
||||
|
||||
// ForkChoiceStore returns the fork choice store in the service
|
||||
// ForkChoiceStore returns the fork choice store in the service.
|
||||
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.store.SetFinalizedCheckpt(cp)
|
||||
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
|
||||
|
||||
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
genesisRoot := [32]byte{'A'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.store.SetFinalizedCheckpt(cp)
|
||||
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
|
||||
c.originBlockRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], c.FinalizedCheckpt().Root)
|
||||
}
|
||||
@@ -73,7 +73,7 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
|
||||
cp := ðpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c.store.SetJustifiedCheckpt(cp)
|
||||
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
|
||||
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
genesisRoot := [32]byte{'B'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c.store.SetJustifiedCheckpt(cp)
|
||||
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
|
||||
c.originBlockRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], c.CurrentJustifiedCheckpt().Root)
|
||||
}
|
||||
|
||||
@@ -11,8 +11,6 @@ var (
|
||||
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
|
||||
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
|
||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
||||
// errNilParentInDB is returned when a nil parent block is returned from the DB.
|
||||
errNilParentInDB = errors.New("nil parent block in DB")
|
||||
// errWrongBlockCount is returned when the wrong number of blocks or
|
||||
// block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
|
||||
@@ -31,11 +31,9 @@ var (
|
||||
|
||||
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
|
||||
type notifyForkchoiceUpdateArg struct {
|
||||
headState state.BeaconState
|
||||
headRoot [32]byte
|
||||
headBlock interfaces.BeaconBlock
|
||||
finalizedRoot [32]byte
|
||||
justifiedRoot [32]byte
|
||||
headState state.BeaconState
|
||||
headRoot [32]byte
|
||||
headBlock interfaces.BeaconBlock
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
@@ -61,18 +59,12 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
finalizedHash, err := s.getPayloadHash(ctx, arg.finalizedRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized block hash")
|
||||
}
|
||||
justifiedHash, err := s.getPayloadHash(ctx, arg.justifiedRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get justified block hash")
|
||||
}
|
||||
finalizedHash := s.store.FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.store.JustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
SafeBlockHash: justifiedHash,
|
||||
FinalizedBlockHash: finalizedHash,
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
@@ -89,10 +81,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
@@ -101,12 +94,35 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := s.updateHead(ctx, s.justifiedBalances.balances)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := s.getBlock(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pid, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headRoot: r,
|
||||
headBlock: b.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": headBlk.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", headRoot),
|
||||
"invalidCount": len(invalidRoots),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return nil, ErrInvalidPayload
|
||||
return pid, ErrInvalidPayload
|
||||
|
||||
default:
|
||||
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
@@ -125,19 +141,19 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
|
||||
// getPayloadHash returns the payload hash given the block root.
|
||||
// if the block is before bellatrix fork epoch, it returns the zero hash.
|
||||
func (s *Service) getPayloadHash(ctx context.Context, root [32]byte) ([]byte, error) {
|
||||
finalizedBlock, err := s.getBlock(ctx, s.ensureRootNotZeros(root))
|
||||
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
|
||||
blk, err := s.getBlock(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(root)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
|
||||
return params.BeaconConfig().ZeroHash[:], nil
|
||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
}
|
||||
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
return payload.BlockHash, nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload.
|
||||
@@ -219,14 +235,10 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk interfaces.B
|
||||
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
|
||||
parent, err := s.getBlock(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parent == nil || parent.IsNil() {
|
||||
return errNilParentInDB
|
||||
}
|
||||
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -266,7 +278,10 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Error("Fee recipient not set. Using burn address")
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
case err != nil:
|
||||
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
@@ -174,12 +175,15 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
|
||||
fc := ðpb.Checkpoint{Epoch: 1, Root: tt.finalizedRoot[:]}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headRoot: tt.headRoot,
|
||||
headBlock: tt.blk,
|
||||
finalizedRoot: tt.finalizedRoot,
|
||||
justifiedRoot: tt.justifiedRoot,
|
||||
headState: st,
|
||||
headRoot: tt.headRoot,
|
||||
headBlock: tt.blk,
|
||||
}
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, arg)
|
||||
if tt.errString != "" {
|
||||
@@ -191,6 +195,147 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// A <- B <- C <- D
|
||||
// \
|
||||
// ---------- E <- F
|
||||
// \
|
||||
// ------ G
|
||||
// D is the current head, attestations for F and G come late, both are invalid.
|
||||
// We switch recursively to F then G and finally to D.
|
||||
//
|
||||
// We test:
|
||||
// 1. forkchoice removes blocks F and G from the forkchoice implementation
|
||||
// 2. forkchoice removes the weights of these blocks
|
||||
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
|
||||
|
||||
func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
// Prepare blocks
|
||||
ba := util.NewBeaconBlockBellatrix()
|
||||
ba.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
wba, err := wrapper.WrappedSignedBeaconBlock(ba)
|
||||
require.NoError(t, err)
|
||||
bra, err := wba.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wba))
|
||||
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
bb.Block.Body.ExecutionPayload.BlockNumber = 2
|
||||
wbb, err := wrapper.WrappedSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
brb, err := wbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbb))
|
||||
|
||||
bc := util.NewBeaconBlockBellatrix()
|
||||
bc.Block.Body.ExecutionPayload.BlockNumber = 3
|
||||
wbc, err := wrapper.WrappedSignedBeaconBlock(bc)
|
||||
require.NoError(t, err)
|
||||
brc, err := wbc.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbc))
|
||||
|
||||
bd := util.NewBeaconBlockBellatrix()
|
||||
pd := [32]byte{'D'}
|
||||
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
|
||||
bd.Block.Body.ExecutionPayload.BlockNumber = 4
|
||||
wbd, err := wrapper.WrappedSignedBeaconBlock(bd)
|
||||
require.NoError(t, err)
|
||||
brd, err := wbd.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbd))
|
||||
|
||||
be := util.NewBeaconBlockBellatrix()
|
||||
pe := [32]byte{'E'}
|
||||
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
|
||||
be.Block.Body.ExecutionPayload.BlockNumber = 5
|
||||
wbe, err := wrapper.WrappedSignedBeaconBlock(be)
|
||||
require.NoError(t, err)
|
||||
bre, err := wbe.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbe))
|
||||
|
||||
bf := util.NewBeaconBlockBellatrix()
|
||||
pf := [32]byte{'F'}
|
||||
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
|
||||
bf.Block.Body.ExecutionPayload.BlockNumber = 6
|
||||
bf.Block.ParentRoot = bre[:]
|
||||
wbf, err := wrapper.WrappedSignedBeaconBlock(bf)
|
||||
require.NoError(t, err)
|
||||
brf, err := wbf.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbf))
|
||||
|
||||
bg := util.NewBeaconBlockBellatrix()
|
||||
bg.Block.Body.ExecutionPayload.BlockNumber = 7
|
||||
pg := [32]byte{'G'}
|
||||
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
|
||||
bg.Block.ParentRoot = bre[:]
|
||||
wbg, err := wrapper.WrappedSignedBeaconBlock(bg)
|
||||
require.NoError(t, err)
|
||||
brg, err := wbg.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
service.justifiedBalances.balances = []uint64{50, 100, 200}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0))
|
||||
|
||||
// Insert Attestations to D, F and G so that they have higher weight than D
|
||||
// Ensure G is head
|
||||
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
|
||||
headRoot, err := fcs.Head(ctx, 0, bra, []uint64{50, 100, 200}, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brg, headRoot)
|
||||
|
||||
// Prepare Engine Mock to return invalid unless head is D, LVH = E
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
fc := ðpb.Checkpoint{Epoch: 0, Root: bra[:]}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
|
||||
a := ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headBlock: wbg.Block(),
|
||||
headRoot: brg,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.ErrorIs(t, ErrInvalidPayload, err)
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, 0, bra, service.justifiedBalances.balances, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brd, headRoot)
|
||||
|
||||
// Ensure F and G where removed but their parent E wasn't
|
||||
require.Equal(t, false, fcs.HasNode(brf))
|
||||
require.Equal(t, false, fcs.HasNode(brg))
|
||||
require.Equal(t, true, fcs.HasNode(bre))
|
||||
}
|
||||
|
||||
func Test_NotifyNewPayload(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
@@ -537,11 +682,11 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
jRoot, err := tt.justified.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
|
||||
service.store.SetJustifiedCheckpt(
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(
|
||||
ðpb.Checkpoint{
|
||||
Root: jRoot[:],
|
||||
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
|
||||
})
|
||||
}, [32]byte{'a'})
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
|
||||
|
||||
err = service.optimisticCandidateBlock(ctx, tt.blk)
|
||||
@@ -625,7 +770,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
@@ -803,7 +948,7 @@ func TestService_getPayloadHash(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = service.getPayloadHash(ctx, [32]byte{})
|
||||
_, err = service.getPayloadHash(ctx, []byte{})
|
||||
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
@@ -813,20 +958,20 @@ func TestService_getPayloadHash(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(r, wsb)
|
||||
|
||||
h, err := service.getPayloadHash(ctx, r)
|
||||
h, err := service.getPayloadHash(ctx, r[:])
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], h)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, h)
|
||||
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
h = []byte{'a'}
|
||||
bb.Block.Body.ExecutionPayload.BlockHash = h
|
||||
h = [32]byte{'a'}
|
||||
bb.Block.Body.ExecutionPayload.BlockHash = h[:]
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(r, wsb)
|
||||
|
||||
h, err = service.getPayloadHash(ctx, r)
|
||||
h, err = service.getPayloadHash(ctx, r[:])
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []byte{'a'}, h)
|
||||
require.DeepEqual(t, [32]byte{'a'}, h)
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update head")
|
||||
}
|
||||
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
headBlock, err := s.getBlock(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -86,7 +86,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
// re-initiate fork choice store using the latest justified info.
|
||||
// This recovers a fatal condition and should not happen in run time.
|
||||
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
|
||||
jb, err := s.getBlock(ctx, headStartRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@@ -355,7 +355,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
// attestation pool. It also filters out the attestations that is one epoch older as a
|
||||
// defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
|
||||
orphanedBlk, err := s.cfg.BeaconDB.Block(ctx, orphanedRoot)
|
||||
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -154,8 +154,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{'b'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
||||
headRoot, err := service.updateHead(context.Background(), []uint64{})
|
||||
require.NoError(t, err)
|
||||
@@ -298,8 +298,8 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
Root: bellatrixBlkRoot[:],
|
||||
Epoch: 1,
|
||||
}
|
||||
service.store.SetFinalizedCheckpt(fcp)
|
||||
service.store.SetJustifiedCheckpt(fcp)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
|
||||
@@ -21,7 +21,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
}
|
||||
}
|
||||
|
||||
// warning: only use these opts when you are certain there are no db calls
|
||||
// WARNING: only use these opts when you are certain there are no db calls
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
|
||||
@@ -64,7 +64,11 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
return err
|
||||
}
|
||||
if bytes.Equal(r, f.Root) {
|
||||
s.store.SetJustifiedCheckpt(bj)
|
||||
h, err := s.getPayloadHash(ctx, bj.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -324,9 +324,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
r := [32]byte{'g'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
||||
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
|
||||
r = bytesutil.ToBytes32([]byte{'A'})
|
||||
@@ -358,9 +358,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
@@ -500,7 +500,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
@@ -535,7 +535,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
@@ -564,7 +564,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r32[:], Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
|
||||
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
@@ -591,7 +591,7 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r32[:], Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
|
||||
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
|
||||
@@ -123,7 +123,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
@@ -146,9 +148,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
@@ -195,9 +194,19 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
newFinalized := postState.FinalizedCheckpointEpoch() > finalized.Epoch
|
||||
if newFinalized {
|
||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
||||
s.store.SetFinalizedCheckpt(postState.FinalizedCheckpoint())
|
||||
cp := postState.FinalizedCheckpoint()
|
||||
h, err := s.getPayloadHash(ctx, cp.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(cp, h)
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
s.store.SetJustifiedCheckpt(postState.CurrentJustifiedCheckpoint())
|
||||
cp = postState.CurrentJustifiedCheckpoint()
|
||||
h, err = s.getPayloadHash(ctx, cp.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
|
||||
}
|
||||
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
|
||||
@@ -413,6 +422,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
}
|
||||
s.saveInitSyncBlock(blockRoots[i], b)
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for r, st := range boundaries {
|
||||
@@ -426,14 +439,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
f := fCheckpoints[len(fCheckpoints)-1]
|
||||
j := jCheckpoints[len(jCheckpoints)-1]
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB.Block(),
|
||||
finalizedRoot: bytesutil.ToBytes32(f.Root),
|
||||
justifiedRoot: bytesutil.ToBytes32(j.Root),
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB.Block(),
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return nil, nil, err
|
||||
@@ -484,7 +493,11 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
|
||||
return err
|
||||
}
|
||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
||||
s.store.SetFinalizedCheckpt(fCheckpoint)
|
||||
h, err := s.getPayloadHash(ctx, fCheckpoint.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -97,13 +97,10 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
finalizedBlkSigned, err := s.cfg.BeaconDB.Block(ctx, fRoot)
|
||||
finalizedBlkSigned, err := s.getBlock(ctx, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalizedBlkSigned == nil || finalizedBlkSigned.IsNil() || finalizedBlkSigned.Block().IsNil() {
|
||||
return errors.New("nil finalized block")
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block()
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot())
|
||||
if err != nil {
|
||||
@@ -208,7 +205,11 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
return errNilJustifiedInStore
|
||||
}
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
s.store.SetJustifiedCheckpt(cpt)
|
||||
h, err := s.getPayloadHash(ctx, cpt.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -227,7 +228,11 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckpt(cp)
|
||||
h, err := s.getPayloadHash(ctx, cp.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -350,7 +355,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
higherThanFinalized := slot > fSlot
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
|
||||
b, err := s.getBlock(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -39,6 +40,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock_ProtoArray(t *testing.T) {
|
||||
@@ -129,9 +131,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: roots[0]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
@@ -232,9 +234,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: roots[0]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
@@ -289,7 +291,8 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
@@ -353,7 +356,8 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
@@ -415,7 +419,9 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
@@ -484,7 +490,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: lastJustifiedRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
|
||||
update, err = service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, update, "Should be able to update justified")
|
||||
@@ -516,7 +522,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
|
||||
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: lastJustifiedRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
|
||||
|
||||
update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
require.NoError(t, err)
|
||||
@@ -549,7 +555,7 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: lastJustifiedRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
|
||||
|
||||
update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
require.NoError(t, err)
|
||||
@@ -577,7 +583,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
@@ -614,7 +620,7 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
@@ -648,7 +654,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
@@ -656,7 +662,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
wb, err := wrapper.WrappedBeaconBlock(b.Block)
|
||||
require.NoError(t, err)
|
||||
err = service.verifyBlkPreState(ctx, wb)
|
||||
@@ -691,7 +697,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
r, err := signedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: []byte{'A'}})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: []byte{'A'}}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: []byte{'A'}})
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -723,7 +729,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -768,7 +774,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -814,7 +820,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -863,7 +869,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -913,7 +919,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -974,7 +980,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -1321,7 +1327,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
args: args{
|
||||
finalizedRoot: [32]byte{'a'},
|
||||
},
|
||||
wantedErr: "nil finalized block",
|
||||
wantedErr: "block not found in cache or db",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block root in DB",
|
||||
@@ -1350,7 +1356,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: tt.args.finalizedRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: tt.args.finalizedRoot[:]}, [32]byte{})
|
||||
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
@@ -1378,7 +1384,7 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
|
||||
service.originBlockRoot = gRoot
|
||||
currentCp := ðpb.Checkpoint{Epoch: 1}
|
||||
service.store.SetJustifiedCheckpt(currentCp)
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(currentCp, [32]byte{'a'})
|
||||
newCp := ðpb.Checkpoint{Epoch: 2, Root: gRoot[:]}
|
||||
|
||||
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
|
||||
@@ -1439,7 +1445,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
|
||||
testState := gs.Copy()
|
||||
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
@@ -1493,7 +1499,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
|
||||
testState := gs.Copy()
|
||||
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
@@ -1524,7 +1530,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
@@ -1563,7 +1569,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 7}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(6))
|
||||
@@ -1887,3 +1893,82 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service.insertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
|
||||
}
|
||||
|
||||
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
|
||||
blk1, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb1, err := wrapper.WrappedSignedBeaconBlock(blk1)
|
||||
require.NoError(t, err)
|
||||
blk2, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb2, err := wrapper.WrappedSignedBeaconBlock(blk2)
|
||||
require.NoError(t, err)
|
||||
blk3, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb3, err := wrapper.WrappedSignedBeaconBlock(blk3)
|
||||
require.NoError(t, err)
|
||||
blk4, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 4)
|
||||
require.NoError(t, err)
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb4, err := wrapper.WrappedSignedBeaconBlock(blk4)
|
||||
require.NoError(t, err)
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
for i := 0; i < 10; i++ {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb1, r1))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb2, r2))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb3, r3))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb4, r4))
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
require.LogsDoNotContain(t, logHook, "New head does not exist in DB. Do nothing")
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r1))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'a'})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,42 +128,55 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
return
|
||||
}
|
||||
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
continue
|
||||
if err := s.UpdateHead(s.ctx); err != nil {
|
||||
log.WithError(err).Error("Could not process attestations and update head")
|
||||
return
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
log.WithError(errNilJustifiedInStore).Error("Could not get justified checkpoint")
|
||||
continue
|
||||
}
|
||||
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(justified.Root))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Unable to get justified balances for root %v", justified.Root)
|
||||
continue
|
||||
}
|
||||
newHeadRoot, err := s.updateHead(s.ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
if s.headRoot() != newHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.notifyEngineIfChangedHead(s.ctx, newHeadRoot)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
|
||||
// It requires no external inputs.
|
||||
func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only one process can process attestations and update head at a time.
|
||||
s.processAttestationsLock.Lock()
|
||||
defer s.processAttestationsLock.Unlock()
|
||||
|
||||
s.processAttestations(ctx)
|
||||
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
return errNilJustifiedInStore
|
||||
}
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newHeadRoot, err := s.updateHead(ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
if s.headRoot() != newHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
|
||||
if s.headRoot() == newHeadRoot {
|
||||
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -172,12 +185,6 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
return // We don't have the block, don't notify the engine and update head.
|
||||
}
|
||||
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
|
||||
return
|
||||
}
|
||||
|
||||
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get new head block")
|
||||
@@ -189,11 +196,9 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
return
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: newHeadBlock.Block(),
|
||||
finalizedRoot: bytesutil.ToBytes32(finalized.Root),
|
||||
justifiedRoot: bytesutil.ToBytes32(s.store.JustifiedCheckpt().Root),
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: newHeadBlock.Block(),
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
|
||||
if err != nil {
|
||||
|
||||
@@ -137,14 +137,14 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
service.notifyEngineIfChangedHead(ctx, service.headRoot())
|
||||
hookErr := "could not notify forkchoice update"
|
||||
finalizedErr := "could not get finalized checkpoint"
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
invalidStateErr := "Could not get state from db"
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock([32]byte{'a'}, gb)
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
|
||||
require.LogsContain(t, hook, finalizedErr)
|
||||
require.LogsContain(t, hook, invalidStateErr)
|
||||
|
||||
hook.Reset()
|
||||
service.head = &head{
|
||||
@@ -169,9 +169,9 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.store.SetFinalizedCheckpt(finalized)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
|
||||
// Block in DB
|
||||
@@ -191,12 +191,51 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.store.SetFinalizedCheckpt(finalized)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, types.ValidatorIndex(1), vId)
|
||||
require.Equal(t, [8]byte{1}, payloadID)
|
||||
|
||||
// Test zero headRoot returns immediately.
|
||||
headRoot := service.headRoot()
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{})
|
||||
require.Equal(t, service.headRoot(), headRoot)
|
||||
}
|
||||
|
||||
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()), WithStateNotifier(&mockBeaconNode{}))
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: tRoot[:]}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
b := util.NewBeaconBlock()
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
|
||||
service.head.root = r // Old head
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.NoError(t, err, service.UpdateHead(ctx))
|
||||
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
|
||||
}
|
||||
|
||||
@@ -30,9 +30,9 @@ type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the the operations (minus pubsub)
|
||||
// that are performed on blocks that is received from regular sync service. The operations consists of:
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// ReceiveBlock is a function that defines the operations (minus pubsub)
|
||||
// that are performed on a received block. The operations consist of:
|
||||
// 1. Validate block, apply state transition and update checkpoints
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
@@ -85,7 +85,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
||||
defer span.End()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -94,10 +94,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
||||
|
||||
for i, b := range blocks {
|
||||
blockCopy := b.Copy()
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
|
||||
@@ -141,7 +141,8 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
h := [32]byte{'a'}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, h)
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
||||
@@ -181,7 +182,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -262,7 +263,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
||||
@@ -312,7 +313,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
||||
@@ -323,7 +324,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -336,7 +337,7 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 10000000})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
|
||||
@@ -50,21 +50,22 @@ const headSyncMinEpochsAfterCheckpoint = 128
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
nextEpochBoundarySlot types.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
store *store.Store
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
nextEpochBoundarySlot types.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
store *store.Store
|
||||
processAttestationsLock sync.Mutex
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -143,6 +144,7 @@ func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
|
||||
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
|
||||
// Save the last finalized state so that starting up in the following run will be much faster.
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -199,13 +201,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
forkChoicer = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
|
||||
}
|
||||
s.cfg.ForkChoiceStore = forkChoicer
|
||||
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
fb, err := s.getBlock(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
if fb == nil || fb.IsNil() {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
payloadHash, err := getBlockPayloadHash(fb.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload hash")
|
||||
@@ -337,14 +336,13 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
finalizedState.Slot(), flags.HeadSync.Name)
|
||||
}
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
if finalizedState == nil || finalizedState.IsNil() {
|
||||
return errors.New("finalized state can't be nil")
|
||||
}
|
||||
|
||||
if finalizedState == nil || finalizedState.IsNil() || finalizedBlock == nil || finalizedBlock.IsNil() {
|
||||
return errors.New("finalized state and block can't be nil")
|
||||
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -86,9 +87,11 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
require.NoError(t, err)
|
||||
err = beaconDB.SavePowchainData(ctx, ðpb.ETH1ChainData{
|
||||
BeaconState: pbState,
|
||||
Trie: ðpb.SparseMerkleTrie{},
|
||||
Trie: mockTrie.ToProto(),
|
||||
CurrentEth1Data: ðpb.LatestETH1Data{
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
@@ -221,7 +224,8 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
trie, _, err := util.DepositTrieFromDeposits(deposits)
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot := trie.HashTreeRoot()
|
||||
hashTreeRoot, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genState, err := transition.EmptyGenesisState()
|
||||
require.NoError(t, err)
|
||||
err = genState.SetEth1Data(ðpb.Eth1Data{
|
||||
@@ -500,7 +504,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -521,7 +525,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -594,7 +598,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
@@ -617,7 +621,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -37,7 +37,7 @@ func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
|
||||
// the previously read value. This cache assumes we only want to cache one
|
||||
// set of balances for a single root (the current justified root).
|
||||
//
|
||||
// warning: this is not thread-safe on its own, relies on get() for locking
|
||||
// WARNING: this is not thread-safe on its own, relies on get() for locking
|
||||
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
stateBalanceCacheMiss.Inc()
|
||||
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
|
||||
@@ -23,6 +23,13 @@ func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return s.justifiedCheckpt
|
||||
}
|
||||
|
||||
// JustifiedPayloadBlockHash returns the justified payload block hash reflecting justified check point.
|
||||
func (s *Store) JustifiedPayloadBlockHash() [32]byte {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.justifiedPayloadBlockHash
|
||||
}
|
||||
|
||||
// PrevFinalizedCheckpt returns the previous finalized checkpoint in the Store.
|
||||
func (s *Store) PrevFinalizedCheckpt() *ethpb.Checkpoint {
|
||||
s.RLock()
|
||||
@@ -37,6 +44,13 @@ func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return s.finalizedCheckpt
|
||||
}
|
||||
|
||||
// FinalizedPayloadBlockHash returns the finalized payload block hash reflecting finalized check point.
|
||||
func (s *Store) FinalizedPayloadBlockHash() [32]byte {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.finalizedPayloadBlockHash
|
||||
}
|
||||
|
||||
// SetPrevJustifiedCheckpt sets the previous justified checkpoint in the Store.
|
||||
func (s *Store) SetPrevJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.Lock()
|
||||
@@ -51,18 +65,20 @@ func (s *Store) SetBestJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.bestJustifiedCheckpt = cp
|
||||
}
|
||||
|
||||
// SetJustifiedCheckpt sets the justified checkpoint in the Store.
|
||||
func (s *Store) SetJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
// SetJustifiedCheckptAndPayloadHash sets the justified checkpoint and blockhash in the Store.
|
||||
func (s *Store) SetJustifiedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.justifiedCheckpt = cp
|
||||
s.justifiedPayloadBlockHash = h
|
||||
}
|
||||
|
||||
// SetFinalizedCheckpt sets the finalized checkpoint in the Store.
|
||||
func (s *Store) SetFinalizedCheckpt(cp *ethpb.Checkpoint) {
|
||||
// SetFinalizedCheckptAndPayloadHash sets the finalized checkpoint and blockhash in the Store.
|
||||
func (s *Store) SetFinalizedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.finalizedCheckpt = cp
|
||||
s.finalizedPayloadBlockHash = h
|
||||
}
|
||||
|
||||
// SetPrevFinalizedCheckpt sets the previous finalized checkpoint in the Store.
|
||||
|
||||
@@ -30,8 +30,10 @@ func Test_store_JustifiedCheckpt(t *testing.T) {
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.JustifiedCheckpt())
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetJustifiedCheckpt(cp)
|
||||
h := [32]byte{'b'}
|
||||
s.SetJustifiedCheckptAndPayloadHash(cp, h)
|
||||
require.Equal(t, cp, s.JustifiedCheckpt())
|
||||
require.Equal(t, h, s.JustifiedPayloadBlockHash())
|
||||
}
|
||||
|
||||
func Test_store_FinalizedCheckpt(t *testing.T) {
|
||||
@@ -39,8 +41,10 @@ func Test_store_FinalizedCheckpt(t *testing.T) {
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.FinalizedCheckpt())
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetFinalizedCheckpt(cp)
|
||||
h := [32]byte{'b'}
|
||||
s.SetFinalizedCheckptAndPayloadHash(cp, h)
|
||||
require.Equal(t, cp, s.FinalizedCheckpt())
|
||||
require.Equal(t, h, s.FinalizedPayloadBlockHash())
|
||||
}
|
||||
|
||||
func Test_store_PrevFinalizedCheckpt(t *testing.T) {
|
||||
|
||||
@@ -17,9 +17,11 @@ import (
|
||||
// best_justified_checkpoint: Checkpoint
|
||||
// proposerBoostRoot: Root
|
||||
type Store struct {
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
justifiedPayloadBlockHash [32]byte
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
finalizedPayloadBlockHash [32]byte
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
sync.RWMutex
|
||||
// These are not part of the consensus spec, but we do use them to return gRPC API requests.
|
||||
// TODO(10094): Consider removing in v3.
|
||||
|
||||
@@ -451,6 +451,8 @@ func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool,
|
||||
return s.Optimistic, nil
|
||||
}
|
||||
|
||||
// ProcessAttestationsAndUpdateHead mocks the same method in the chain service.
|
||||
func (s *ChainService) UpdateHead(_ context.Context) error { return nil }
|
||||
|
||||
// ReceiveAttesterSlashing mocks the same method in the chain service.
|
||||
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {
|
||||
}
|
||||
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
|
||||
|
||||
@@ -79,7 +79,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
store: &store.Store{},
|
||||
wsVerifier: wv,
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: tt.finalizedEpoch})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: tt.finalizedEpoch}, [32]byte{})
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.store.FinalizedCheckpt().Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -430,7 +430,11 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
}
|
||||
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
rootA, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rootA, rootB)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
@@ -488,7 +492,11 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
}
|
||||
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
rootA, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rootA, rootB)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
|
||||
|
||||
@@ -143,7 +143,8 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root := depositTrie.HashTreeRoot()
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
@@ -202,7 +203,8 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
trie, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
|
||||
@@ -173,7 +173,8 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root := depositTrie.HashTreeRoot()
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
@@ -233,7 +234,8 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
trie, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
@@ -289,7 +291,9 @@ func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
dep[i].Proof = proof
|
||||
}
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
@@ -376,7 +380,9 @@ func TestProcessDeposit_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root := depositTrie.HashTreeRoot()
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
|
||||
@@ -41,7 +41,10 @@ func UpdateGenesisEth1Data(state state.BeaconState, deposits []*ethpb.Deposit, e
|
||||
}
|
||||
}
|
||||
|
||||
depositRoot := t.HashTreeRoot()
|
||||
depositRoot, err := t.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eth1Data.DepositRoot = depositRoot[:]
|
||||
err = state.SetEth1Data(eth1Data)
|
||||
if err != nil {
|
||||
|
||||
@@ -81,7 +81,7 @@ func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error)
|
||||
if i != len(children)-1 {
|
||||
children[i] = children[len(children)-1]
|
||||
}
|
||||
node.parent.children = children[:len(children)-2]
|
||||
node.parent.children = children[:len(children)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,3 +203,26 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
// This is a regression test (10565)
|
||||
// ----- C
|
||||
// /
|
||||
// A <- B
|
||||
// \
|
||||
// ----------D
|
||||
// D is invalid
|
||||
|
||||
func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, 1, 1))
|
||||
|
||||
_, err := f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
|
||||
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -111,35 +111,37 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
|
||||
// Insert a second block at slot 3 into the tree and boost its score.
|
||||
// Insert a second block at slot 4 into the tree and boost its score.
|
||||
// 0
|
||||
// |
|
||||
// 1
|
||||
// |
|
||||
// 2
|
||||
// / \
|
||||
// 3 4 <- HEAD
|
||||
slot = types.Slot(3)
|
||||
// 3 |
|
||||
// 4 <- HEAD
|
||||
slot = types.Slot(4)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
indexToHash(2),
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
clockSlot := types.Slot(3)
|
||||
clockSlot := types.Slot(4)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: newRoot,
|
||||
BlockSlot: slot,
|
||||
CurrentSlot: clockSlot,
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -166,17 +168,27 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
//
|
||||
// In this case, we have a small fork:
|
||||
//
|
||||
// (A: 54) -> (B: 44) -> (C: 34)
|
||||
// (A: 54) -> (B: 44) -> (C: 10)
|
||||
// \_->(D: 24)
|
||||
//
|
||||
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
|
||||
// middle instead of the normal progression of (44 -> 34 -> 24).
|
||||
// middle instead of the normal progression of (54 -> 44 -> 24).
|
||||
node1 := f.store.nodeByRoot[indexToHash(1)]
|
||||
require.Equal(t, node1.weight, uint64(54))
|
||||
node2 := f.store.nodeByRoot[indexToHash(2)]
|
||||
require.Equal(t, node2.weight, uint64(44))
|
||||
node3 := f.store.nodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node3.weight, uint64(24))
|
||||
node3 := f.store.nodeByRoot[indexToHash(3)]
|
||||
require.Equal(t, node3.weight, uint64(10))
|
||||
node4 := f.store.nodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node4.weight, uint64(24))
|
||||
|
||||
// Regression: process attestations for C, check that it
|
||||
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
|
||||
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
|
||||
|
||||
})
|
||||
t.Run("vanilla ex ante attack", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
@@ -24,6 +24,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
|
||||
// SetOptimisticToValid is called with the root of a block that was returned as
|
||||
// VALID by the EL.
|
||||
//
|
||||
// WARNING: This method returns an error if the root is not found in forkchoice
|
||||
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
|
||||
f.store.nodesLock.Lock()
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -111,29 +111,30 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
|
||||
// Insert a second block at slot 3 into the tree and boost its score.
|
||||
// Insert a second block at slot 4 into the tree and boost its score.
|
||||
// 0
|
||||
// |
|
||||
// 1
|
||||
// |
|
||||
// 2
|
||||
// / \
|
||||
// 3 4 <- HEAD
|
||||
slot = types.Slot(3)
|
||||
// 3 |
|
||||
// 4 <- HEAD
|
||||
slot = types.Slot(4)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
indexToHash(2),
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
clockSlot := types.Slot(3)
|
||||
clockSlot := types.Slot(4)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: newRoot,
|
||||
BlockSlot: slot,
|
||||
@@ -166,14 +167,22 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
//
|
||||
// In this case, we have a small fork:
|
||||
//
|
||||
// (A: 54) -> (B: 44) -> (C: 24)
|
||||
// \_->(D: 10)
|
||||
// (A: 54) -> (B: 44) -> (C: 10)
|
||||
// \_->(D: 24)
|
||||
//
|
||||
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
|
||||
// middle instead of the normal progression of (44 -> 34 -> 24).
|
||||
// middle instead of the normal progression of (54 -> 44 -> 24).
|
||||
require.Equal(t, f.store.nodes[1].weight, uint64(54))
|
||||
require.Equal(t, f.store.nodes[2].weight, uint64(44))
|
||||
require.Equal(t, f.store.nodes[3].weight, uint64(34))
|
||||
require.Equal(t, f.store.nodes[3].weight, uint64(10))
|
||||
require.Equal(t, f.store.nodes[4].weight, uint64(24))
|
||||
|
||||
// Regression: process attestations for C, check that it
|
||||
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
|
||||
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
|
||||
})
|
||||
t.Run("vanilla ex ante attack", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -117,7 +118,18 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
if !common.IsHexAddress(ha) {
|
||||
return fmt.Errorf("%s is not a valid fee recipient address", ha)
|
||||
}
|
||||
c.DefaultFeeRecipient = common.HexToAddress(ha)
|
||||
mixedcaseAddress, err := common.NewMixedcaseAddressFromString(ha)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not decode fee recipient %s", ha)
|
||||
}
|
||||
checksumAddress := common.HexToAddress(ha)
|
||||
if !mixedcaseAddress.ValidChecksum() {
|
||||
log.Warnf("Fee recipient %s is not a checksum Ethereum address. "+
|
||||
"The checksummed address is %s and will be used as the fee recipient. "+
|
||||
"We recommend using a mixed-case address (checksum) "+
|
||||
"to prevent spelling mistakes in your fee recipient Ethereum address", ha, checksumAddress.Hex())
|
||||
}
|
||||
c.DefaultFeeRecipient = checksumAddress
|
||||
params.OverrideBeaconConfig(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -87,6 +87,7 @@ func TestConfigureProofOfWork(t *testing.T) {
|
||||
|
||||
func TestConfigureExecutionSetting(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
@@ -102,11 +103,15 @@ func TestConfigureExecutionSetting(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
|
||||
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
|
||||
assert.LogsContain(t, hook,
|
||||
"is not a checksum Ethereum address",
|
||||
)
|
||||
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err = configureExecutionSetting(cliCtx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
assert.Equal(t, common.HexToAddress("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
|
||||
}
|
||||
|
||||
func TestConfigureNetwork(t *testing.T) {
|
||||
|
||||
@@ -779,6 +779,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadUpdater: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
@@ -787,6 +788,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
|
||||
@@ -54,7 +54,9 @@ func init() {
|
||||
for k, v := range gossipTopicMappings {
|
||||
GossipTypeMapping[reflect.TypeOf(v)] = k
|
||||
}
|
||||
// Specially handle Altair Objects.
|
||||
// Specially handle Altair objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Bellatrix objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBlindedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ const repeatedSearches = 2 * searchThreshold
|
||||
|
||||
// BlockExists returns true if the block exists, its height and any possible error encountered.
|
||||
func (s *Service) BlockExists(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockExists")
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.BlockExists")
|
||||
defer span.End()
|
||||
|
||||
if exists, hdrInfo, err := s.headerCache.HeaderInfoByHash(hash); exists || err != nil {
|
||||
@@ -45,24 +45,9 @@ func (s *Service) BlockExists(ctx context.Context, hash common.Hash) (bool, *big
|
||||
return true, new(big.Int).Set(header.Number), nil
|
||||
}
|
||||
|
||||
// BlockExistsWithCache returns true if the block exists in cache, its height and any possible error encountered.
|
||||
func (s *Service) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockExistsWithCache")
|
||||
defer span.End()
|
||||
if exists, hdrInfo, err := s.headerCache.HeaderInfoByHash(hash); exists || err != nil {
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
span.AddAttributes(trace.BoolAttribute("blockCacheHit", true))
|
||||
return true, hdrInfo.Number, nil
|
||||
}
|
||||
span.AddAttributes(trace.BoolAttribute("blockCacheHit", false))
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// BlockHashByHeight returns the block hash of the block at the given height.
|
||||
func (s *Service) BlockHashByHeight(ctx context.Context, height *big.Int) (common.Hash, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockHashByHeight")
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.BlockHashByHeight")
|
||||
defer span.End()
|
||||
|
||||
if exists, hInfo, err := s.headerCache.HeaderInfoByHeight(height); exists || err != nil {
|
||||
@@ -90,9 +75,9 @@ func (s *Service) BlockHashByHeight(ctx context.Context, height *big.Int) (commo
|
||||
return header.Hash(), nil
|
||||
}
|
||||
|
||||
// BlockTimeByHeight fetches an eth1.0 block timestamp by its height.
|
||||
// BlockTimeByHeight fetches an eth1 block timestamp by its height.
|
||||
func (s *Service) BlockTimeByHeight(ctx context.Context, height *big.Int) (uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockTimeByHeight")
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.BlockTimeByHeight")
|
||||
defer span.End()
|
||||
if s.eth1DataFetcher == nil {
|
||||
err := errors.New("nil eth1DataFetcher")
|
||||
@@ -111,7 +96,7 @@ func (s *Service) BlockTimeByHeight(ctx context.Context, height *big.Int) (uint6
|
||||
// This is an optimized version with the worst case being O(2*repeatedSearches) number of calls
|
||||
// while in best case search for the block is performed in O(1).
|
||||
func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.HeaderInfo, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockByTimestamp")
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.BlockByTimestamp")
|
||||
defer span.End()
|
||||
|
||||
s.latestEth1DataLock.RLock()
|
||||
@@ -122,15 +107,14 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
if time > latestBlkTime {
|
||||
return nil, errors.Errorf("provided time is later than the current eth1 head. %d > %d", time, latestBlkTime)
|
||||
}
|
||||
// Initialize a pointer to eth1 chain's history to start our search
|
||||
// from.
|
||||
// Initialize a pointer to eth1 chain's history to start our search from.
|
||||
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
|
||||
cursorTime := latestBlkTime
|
||||
|
||||
numOfBlocks := uint64(0)
|
||||
estimatedBlk := cursorNum.Uint64()
|
||||
maxTimeBuffer := searchThreshold * params.BeaconConfig().SecondsPerETH1Block
|
||||
// Terminate if we cant find an acceptable block after
|
||||
// Terminate if we can't find an acceptable block after
|
||||
// repeated searches.
|
||||
for i := 0; i < repeatedSearches; i++ {
|
||||
if ctx.Err() != nil {
|
||||
@@ -157,12 +141,12 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
// time - buffer <= head.time <= time + buffer
|
||||
break
|
||||
}
|
||||
hinfo, err := s.retrieveHeaderInfo(ctx, estimatedBlk)
|
||||
hInfo, err := s.retrieveHeaderInfo(ctx, estimatedBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cursorNum = hinfo.Number
|
||||
cursorTime = hinfo.Time
|
||||
cursorNum = hInfo.Number
|
||||
cursorTime = hInfo.Time
|
||||
}
|
||||
|
||||
// Exit early if we get the desired block.
|
||||
@@ -170,15 +154,15 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
|
||||
}
|
||||
if cursorTime > time {
|
||||
return s.findLessTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
return s.findMaxTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
return s.findMoreTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
return s.findMinTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
|
||||
// Performs a search to find a target eth1 block which is earlier than or equal to the
|
||||
// target time. This method is used when head.time > targetTime
|
||||
func (s *Service) findLessTargetEth1Block(ctx context.Context, startBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := startBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
|
||||
func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := upperBoundBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -194,8 +178,8 @@ func (s *Service) findLessTargetEth1Block(ctx context.Context, startBlk *big.Int
|
||||
|
||||
// Performs a search to find a target eth1 block which is just earlier than or equal to the
|
||||
// target time. This method is used when head.time < targetTime
|
||||
func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := startBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
|
||||
func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := lowerBoundBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -203,8 +187,7 @@ func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Return the last block before we hit the threshold
|
||||
// time.
|
||||
// Return the last block before we hit the threshold time.
|
||||
if info.Time > targetTime {
|
||||
return s.retrieveHeaderInfo(ctx, info.Number.Uint64()-1)
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.headTicker = &time.Ticker{C: tickerChan}
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
tickerChan <- time.Now()
|
||||
web3Service.cancel()
|
||||
exitRoutine <- true
|
||||
@@ -207,51 +207,6 @@ func TestBlockExists_UsesCachedBlockInfo(t *testing.T) {
|
||||
require.Equal(t, 0, height.Cmp(header.Number))
|
||||
}
|
||||
|
||||
func TestBlockExistsWithCache_UsesCachedHeaderInfo(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
server, endpoint, err := mockPOW.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
server.Stop()
|
||||
})
|
||||
web3Service, err := NewService(context.Background(),
|
||||
WithHttpEndpoints([]string{endpoint}),
|
||||
WithDatabase(beaconDB),
|
||||
)
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
|
||||
header := &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
}
|
||||
|
||||
err = web3Service.headerCache.AddHeader(header)
|
||||
require.NoError(t, err)
|
||||
|
||||
exists, height, err := web3Service.BlockExistsWithCache(context.Background(), header.Hash())
|
||||
require.NoError(t, err, "Could not get block hash with given height")
|
||||
require.Equal(t, true, exists)
|
||||
require.Equal(t, 0, height.Cmp(header.Number))
|
||||
}
|
||||
|
||||
func TestBlockExistsWithCache_HeaderNotCached(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
server, endpoint, err := mockPOW.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
server.Stop()
|
||||
})
|
||||
web3Service, err := NewService(context.Background(),
|
||||
WithHttpEndpoints([]string{endpoint}),
|
||||
WithDatabase(beaconDB),
|
||||
)
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
|
||||
exists, height, err := web3Service.BlockExistsWithCache(context.Background(), common.BytesToHash([]byte("hash")))
|
||||
require.NoError(t, err, "Could not get block hash with given height")
|
||||
require.Equal(t, false, exists)
|
||||
require.Equal(t, (*big.Int)(nil), height)
|
||||
}
|
||||
|
||||
func TestService_BlockNumberByTimestamp(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
testAcc, err := mock.Setup()
|
||||
@@ -313,11 +268,11 @@ func TestService_BlockNumberByTimestampLessTargetTime(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
// Provide an unattainable target time
|
||||
_, err = web3Service.findLessTargetEth1Block(ctx, hd.Number, hd.Time/2)
|
||||
_, err = web3Service.findMaxTargetEth1Block(ctx, hd.Number, hd.Time/2)
|
||||
require.ErrorContains(t, context.DeadlineExceeded.Error(), err)
|
||||
|
||||
// Provide an attainable target time
|
||||
blk, err := web3Service.findLessTargetEth1Block(context.Background(), hd.Number, hd.Time-5)
|
||||
blk, err := web3Service.findMaxTargetEth1Block(context.Background(), hd.Number, hd.Time-5)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not less than the head")
|
||||
}
|
||||
@@ -351,11 +306,11 @@ func TestService_BlockNumberByTimestampMoreTargetTime(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
// Provide an unattainable target time with respect to head
|
||||
_, err = web3Service.findMoreTargetEth1Block(ctx, big.NewInt(0).Div(hd.Number, big.NewInt(2)), hd.Time)
|
||||
_, err = web3Service.findMinTargetEth1Block(ctx, big.NewInt(0).Div(hd.Number, big.NewInt(2)), hd.Time)
|
||||
require.ErrorContains(t, context.DeadlineExceeded.Error(), err)
|
||||
|
||||
// Provide an attainable target time with respect to head
|
||||
blk, err := web3Service.findMoreTargetEth1Block(context.Background(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time)
|
||||
blk, err := web3Service.findMinTargetEth1Block(context.Background(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not equal to the head")
|
||||
}
|
||||
|
||||
@@ -138,7 +138,8 @@ func TestProcessDeposit_InvalidPublicKey(t *testing.T) {
|
||||
deposits[0].Proof, err = trie.MerkleProof(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
@@ -178,7 +179,8 @@ func TestProcessDeposit_InvalidSignature(t *testing.T) {
|
||||
trie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err)
|
||||
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
@@ -213,7 +215,8 @@ func TestProcessDeposit_UnableToVerify(t *testing.T) {
|
||||
|
||||
trie, _, err := util.DepositTrieFromDeposits(deposits)
|
||||
require.NoError(t, err)
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: root[:],
|
||||
@@ -265,7 +268,8 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
|
||||
|
||||
trie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err)
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: root[:],
|
||||
@@ -281,7 +285,8 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
|
||||
for i := 0; i < int(factor-1); i++ {
|
||||
assert.NoError(t, trie.Insert(dataRoot[:], i))
|
||||
|
||||
trieRoot := trie.HashTreeRoot()
|
||||
trieRoot, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data.DepositRoot = trieRoot[:]
|
||||
eth1Data.DepositCount = uint64(i + 1)
|
||||
|
||||
|
||||
@@ -33,6 +33,10 @@ const (
|
||||
ExecutionBlockByHashMethod = "eth_getBlockByHash"
|
||||
// ExecutionBlockByNumberMethod request string for JSON-RPC.
|
||||
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// Defines the seconds to wait before timing out engine endpoints with block execution semantics (newPayload, forkchoiceUpdated).
|
||||
payloadAndForkchoiceUpdatedTimeout = 8 * time.Second
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
@@ -65,7 +69,9 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
|
||||
defer func() {
|
||||
newPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
d := time.Now().Add(payloadAndForkchoiceUpdatedTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.PayloadStatus{}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payload)
|
||||
if err != nil {
|
||||
@@ -75,8 +81,6 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
|
||||
switch result.Status {
|
||||
case pb.PayloadStatus_INVALID_BLOCK_HASH:
|
||||
return nil, fmt.Errorf("could not validate block hash: %v", result.ValidationError)
|
||||
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
return nil, fmt.Errorf("could not satisfy terminal block condition: %v", result.ValidationError)
|
||||
case pb.PayloadStatus_ACCEPTED, pb.PayloadStatus_SYNCING:
|
||||
return nil, ErrAcceptedSyncingPayloadStatus
|
||||
case pb.PayloadStatus_INVALID:
|
||||
@@ -99,6 +103,9 @@ func (s *Service) ForkchoiceUpdated(
|
||||
forkchoiceUpdatedLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
d := time.Now().Add(payloadAndForkchoiceUpdatedTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &ForkchoiceUpdatedResponse{}
|
||||
err := s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, attrs)
|
||||
if err != nil {
|
||||
@@ -110,8 +117,6 @@ func (s *Service) ForkchoiceUpdated(
|
||||
}
|
||||
resp := result.Status
|
||||
switch resp.Status {
|
||||
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
return nil, nil, fmt.Errorf("could not satisfy terminal block condition: %v", resp.ValidationError)
|
||||
case pb.PayloadStatus_SYNCING:
|
||||
return nil, nil, ErrAcceptedSyncingPayloadStatus
|
||||
case pb.PayloadStatus_INVALID:
|
||||
@@ -132,6 +137,9 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte) (*pb.Execut
|
||||
getPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.ExecutionPayload{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
|
||||
return result, handleRPCError(err)
|
||||
@@ -147,10 +155,14 @@ func (s *Service) ExchangeTransitionConfiguration(
|
||||
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
|
||||
zeroBigNum := big.NewInt(0)
|
||||
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.TransitionConfiguration{}
|
||||
if err := s.rpcClient.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
|
||||
return handleRPCError(err)
|
||||
}
|
||||
|
||||
// We surface an error to the user if local configuration settings mismatch
|
||||
// according to the response from the execution node.
|
||||
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]
|
||||
|
||||
@@ -217,27 +217,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
SafeBlockHash: []byte("safe"),
|
||||
FinalizedBlockHash: []byte("finalized"),
|
||||
}
|
||||
payloadAttributes := &pb.PayloadAttributes{
|
||||
Timestamp: 1,
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
want, ok := fix["ForkchoiceUpdatedInvalidTerminalBlockResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
|
||||
require.ErrorContains(t, "could not satisfy terminal block condition", err)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" VALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -274,18 +253,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorContains(t, "could not validate block hash", err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidTerminalBlockStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
require.ErrorContains(t, "could not satisfy terminal block condition", err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -819,13 +786,6 @@ func fixtures() map[string]interface{} {
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
forkChoiceInvalidTerminalBlockResp := &ForkchoiceUpdatedResponse{
|
||||
Status: &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
|
||||
LatestValidHash: nil,
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
forkChoiceAcceptedResp := &ForkchoiceUpdatedResponse{
|
||||
Status: &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_ACCEPTED,
|
||||
@@ -856,10 +816,6 @@ func fixtures() map[string]interface{} {
|
||||
Status: pb.PayloadStatus_INVALID_BLOCK_HASH,
|
||||
LatestValidHash: nil,
|
||||
}
|
||||
inValidTerminalBlockStatus := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
|
||||
LatestValidHash: nil,
|
||||
}
|
||||
acceptedStatus := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_ACCEPTED,
|
||||
LatestValidHash: nil,
|
||||
@@ -877,21 +833,19 @@ func fixtures() map[string]interface{} {
|
||||
LatestValidHash: foo[:],
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"InvalidTerminalBlockStatus": inValidTerminalBlockStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
"SyncingStatus": syncingStatus,
|
||||
"InvalidStatus": invalidStatus,
|
||||
"UnknownStatus": unknownStatus,
|
||||
"ForkchoiceUpdatedResponse": forkChoiceResp,
|
||||
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
|
||||
"ForkchoiceUpdatedInvalidTerminalBlockResponse": forkChoiceInvalidTerminalBlockResp,
|
||||
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
|
||||
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
"SyncingStatus": syncingStatus,
|
||||
"InvalidStatus": invalidStatus,
|
||||
"UnknownStatus": unknownStatus,
|
||||
"ForkchoiceUpdatedResponse": forkChoiceResp,
|
||||
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
|
||||
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
|
||||
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,6 @@ var (
|
||||
ErrUnknownPayload = errors.New("payload does not exist or is not available")
|
||||
// ErrUnknownPayloadStatus when the payload status is unknown.
|
||||
ErrUnknownPayloadStatus = errors.New("unknown payload status")
|
||||
// ErrUnsupportedScheme for unsupported URL schemes.
|
||||
ErrUnsupportedScheme = errors.New("unsupported url scheme, only http(s) and ipc are supported")
|
||||
// ErrConfigMismatch when the execution node's terminal total difficulty or
|
||||
// terminal block hash received via the API mismatches Prysm's configuration value.
|
||||
ErrConfigMismatch = errors.New("execution client configuration mismatch")
|
||||
|
||||
@@ -36,7 +36,7 @@ var (
|
||||
const eth1DataSavingInterval = 1000
|
||||
const maxTolerableDifference = 50
|
||||
const defaultEth1HeaderReqLimit = uint64(1000)
|
||||
const depositlogRequestLimit = 10000
|
||||
const depositLogRequestLimit = 10000
|
||||
const additiveFactorMultiplier = 0.10
|
||||
const multiplicativeDecreaseDivisor = 2
|
||||
|
||||
@@ -57,7 +57,7 @@ func (s *Service) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
|
||||
return s.chainStartData.GenesisTime, big.NewInt(int64(s.chainStartData.GenesisBlock))
|
||||
}
|
||||
|
||||
// ProcessETH1Block processes the logs from the provided eth1Block.
|
||||
// ProcessETH1Block processes logs from the provided eth1 block.
|
||||
func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
|
||||
query := ethereum.FilterQuery{
|
||||
Addresses: []common.Address{
|
||||
@@ -80,7 +80,7 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
|
||||
}
|
||||
}
|
||||
if !s.chainStartData.Chainstarted {
|
||||
if err := s.checkBlockNumberForChainStart(ctx, blkNum); err != nil {
|
||||
if err := s.processChainStartFromBlockNum(ctx, blkNum); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
|
||||
}
|
||||
|
||||
// ProcessLog is the main method which handles the processing of all
|
||||
// logs from the deposit contract on the ETH1.0 chain.
|
||||
// logs from the deposit contract on the eth1 chain.
|
||||
func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) error {
|
||||
s.processingLock.RLock()
|
||||
defer s.processingLock.RUnlock()
|
||||
@@ -107,7 +107,7 @@ func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) erro
|
||||
}
|
||||
|
||||
// ProcessDepositLog processes the log which had been received from
|
||||
// the ETH1.0 chain by trying to ascertain which participant deposited
|
||||
// the eth1 chain by trying to ascertain which participant deposited
|
||||
// in the contract.
|
||||
func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Log) error {
|
||||
pubkey, withdrawalCredentials, amount, signature, merkleTreeIndex, err := contracts.UnpackDepositLogData(depositLog.Data)
|
||||
@@ -140,7 +140,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
|
||||
depositHash, err := depositData.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Unable to determine hashed value of deposit")
|
||||
return errors.Wrap(err, "unable to determine hashed value of deposit")
|
||||
}
|
||||
|
||||
// Defensive check to validate incoming index.
|
||||
@@ -158,20 +158,27 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
if !s.chainStartData.Chainstarted {
|
||||
proof, err := s.depositTrie.MerkleProof(int(index))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Unable to generate merkle proof for deposit")
|
||||
return errors.Wrap(err, "unable to generate merkle proof for deposit")
|
||||
}
|
||||
deposit.Proof = proof
|
||||
}
|
||||
|
||||
// We always store all historical deposits in the DB.
|
||||
err = s.cfg.depositCache.InsertDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.HashTreeRoot())
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to determine root of deposit trie")
|
||||
}
|
||||
err = s.cfg.depositCache.InsertDeposit(ctx, deposit, depositLog.BlockNumber, index, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to insert deposit into cache")
|
||||
}
|
||||
validData := true
|
||||
if !s.chainStartData.Chainstarted {
|
||||
s.chainStartData.ChainstartDeposits = append(s.chainStartData.ChainstartDeposits, deposit)
|
||||
root := s.depositTrie.HashTreeRoot()
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to determine root of deposit trie")
|
||||
}
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
|
||||
@@ -181,7 +188,11 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
validData = false
|
||||
}
|
||||
} else {
|
||||
s.cfg.depositCache.InsertPendingDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.HashTreeRoot())
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to determine root of deposit trie")
|
||||
}
|
||||
s.cfg.depositCache.InsertPendingDeposit(ctx, deposit, depositLog.BlockNumber, index, root)
|
||||
}
|
||||
if validData {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -215,7 +226,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
}
|
||||
|
||||
// ProcessChainStart processes the log which had been received from
|
||||
// the ETH1.0 chain by trying to determine when to start the beacon chain.
|
||||
// the eth1 chain by trying to determine when to start the beacon chain.
|
||||
func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte, blockNumber *big.Int) {
|
||||
s.chainStartData.Chainstarted = true
|
||||
s.chainStartData.GenesisBlock = blockNumber.Uint64()
|
||||
@@ -225,12 +236,16 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
for i := range s.chainStartData.ChainstartDeposits {
|
||||
proof, err := s.depositTrie.MerkleProof(i)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to generate deposit proof %v", err)
|
||||
log.Errorf("unable to generate deposit proof %v", err)
|
||||
}
|
||||
s.chainStartData.ChainstartDeposits[i].Proof = proof
|
||||
}
|
||||
|
||||
root := s.depositTrie.HashTreeRoot()
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil { // This should never happen.
|
||||
log.WithError(err).Error("unable to determine root of deposit trie, aborting chain start")
|
||||
return
|
||||
}
|
||||
s.chainStartData.Eth1Data = ðpb.Eth1Data{
|
||||
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
|
||||
DepositRoot: root[:],
|
||||
@@ -247,15 +262,15 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
},
|
||||
})
|
||||
if err := s.savePowchainData(s.ctx); err != nil {
|
||||
// continue on, if the save fails as this will get re-saved
|
||||
// continue on if the save fails as this will get re-saved
|
||||
// in the next interval.
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// createGenesisTime adds in the genesis delay to the eth1 block time
|
||||
// on which it was triggered.
|
||||
func createGenesisTime(timeStamp uint64) uint64 {
|
||||
// adds in the genesis delay to the eth1 block time
|
||||
// on which it was triggered.
|
||||
return timeStamp + params.BeaconConfig().GenesisDelay
|
||||
}
|
||||
|
||||
@@ -292,7 +307,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
latestFollowHeight, err := s.followBlockHeight(ctx)
|
||||
latestFollowHeight, err := s.followedBlockHeight(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -318,7 +333,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
|
||||
// only change the end block if the remaining logs are below the required log limit.
|
||||
// reset our query and end block in this case.
|
||||
withinLimit := remainingLogs < depositlogRequestLimit
|
||||
withinLimit := remainingLogs < depositLogRequestLimit
|
||||
aboveFollowHeight := end >= latestFollowHeight
|
||||
if withinLimit && aboveFollowHeight {
|
||||
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
|
||||
@@ -413,9 +428,9 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
// logs from the period last polled to now.
|
||||
func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
// We request for the nth block behind the current head, in order to have
|
||||
// stabilized logs when we retrieve it from the 1.0 chain.
|
||||
// stabilized logs when we retrieve it from the eth1 chain.
|
||||
|
||||
requestedBlock, err := s.followBlockHeight(ctx)
|
||||
requestedBlock, err := s.followedBlockHeight(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -457,18 +472,17 @@ func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int)
|
||||
return bHash, timeStamp, nil
|
||||
}
|
||||
|
||||
// checkBlockNumberForChainStart checks the given block number for if chainstart has occurred.
|
||||
func (s *Service) checkBlockNumberForChainStart(ctx context.Context, blkNum *big.Int) error {
|
||||
func (s *Service) processChainStartFromBlockNum(ctx context.Context, blkNum *big.Int) error {
|
||||
bHash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.checkForChainstart(ctx, bHash, blkNum, timeStamp)
|
||||
s.processChainStartIfReady(ctx, bHash, blkNum, timeStamp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) checkHeaderForChainstart(ctx context.Context, header *gethTypes.Header) {
|
||||
s.checkForChainstart(ctx, header.Hash(), header.Number, header.Time)
|
||||
func (s *Service) processChainStartFromHeader(ctx context.Context, header *gethTypes.Header) {
|
||||
s.processChainStartIfReady(ctx, header.Hash(), header.Number, header.Time)
|
||||
}
|
||||
|
||||
func (s *Service) checkHeaderRange(ctx context.Context, start, end uint64, headersMap map[uint64]*gethTypes.Header,
|
||||
@@ -484,7 +498,7 @@ func (s *Service) checkHeaderRange(ctx context.Context, start, end uint64, heade
|
||||
i--
|
||||
continue
|
||||
}
|
||||
s.checkHeaderForChainstart(ctx, h)
|
||||
s.processChainStartFromHeader(ctx, h)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -504,7 +518,7 @@ func (s *Service) currentCountAndTime(ctx context.Context, blockTime uint64) (ui
|
||||
return valCount, createGenesisTime(blockTime)
|
||||
}
|
||||
|
||||
func (s *Service) checkForChainstart(ctx context.Context, blockHash [32]byte, blockNumber *big.Int, blockTime uint64) {
|
||||
func (s *Service) processChainStartIfReady(ctx context.Context, blockHash [32]byte, blockNumber *big.Int, blockTime uint64) {
|
||||
valCount, genesisTime := s.currentCountAndTime(ctx, blockTime)
|
||||
if valCount == 0 {
|
||||
return
|
||||
@@ -516,7 +530,7 @@ func (s *Service) checkForChainstart(ctx context.Context, blockHash [32]byte, bl
|
||||
}
|
||||
}
|
||||
|
||||
// save all powchain related metadata to disk.
|
||||
// savePowchainData saves all powchain related metadata to disk.
|
||||
func (s *Service) savePowchainData(ctx context.Context) error {
|
||||
var pbState *ethpb.BeaconState
|
||||
var err error
|
||||
|
||||
@@ -589,7 +589,7 @@ func TestCheckForChainstart_NoValidator(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := newPowchainService(t, testAcc, beaconDB)
|
||||
s.checkForChainstart(context.Background(), [32]byte{}, nil, 0)
|
||||
s.processChainStartIfReady(context.Background(), [32]byte{}, nil, 0)
|
||||
require.LogsDoNotContain(t, hook, "Could not determine active validator count from pre genesis state")
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@@ -98,7 +97,6 @@ type POWBlockFetcher interface {
|
||||
BlockByTimestamp(ctx context.Context, time uint64) (*types.HeaderInfo, error)
|
||||
BlockHashByHeight(ctx context.Context, height *big.Int) (common.Hash, error)
|
||||
BlockExists(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
|
||||
BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
|
||||
}
|
||||
|
||||
// Chain defines a standard interface for the powchain service in Prysm.
|
||||
@@ -114,7 +112,6 @@ type RPCDataFetcher interface {
|
||||
Close()
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
|
||||
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
|
||||
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
|
||||
}
|
||||
|
||||
// RPCClient defines the rpc methods required to interact with the eth1 node.
|
||||
@@ -139,10 +136,10 @@ type config struct {
|
||||
}
|
||||
|
||||
// Service fetches important information about the canonical
|
||||
// Ethereum ETH1.0 chain via a web3 endpoint using an ethclient. The Random
|
||||
// Beacon Chain requires synchronization with the ETH1.0 chain's current
|
||||
// blockhash, block number, and access to logs within the
|
||||
// Validator Registration Contract on the ETH1.0 chain to kick off the beacon
|
||||
// eth1 chain via a web3 endpoint using an ethclient.
|
||||
// The beacon chain requires synchronization with the eth1 chain's current
|
||||
// block hash, block number, and access to logs within the
|
||||
// Validator Registration Contract on the eth1 chain to kick off the beacon
|
||||
// chain's validator registration process.
|
||||
type Service struct {
|
||||
connectedETH1 bool
|
||||
@@ -152,7 +149,7 @@ type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
headTicker *time.Ticker
|
||||
eth1HeadTicker *time.Ticker
|
||||
httpLogger bind.ContractFilterer
|
||||
eth1DataFetcher RPCDataFetcher
|
||||
rpcClient RPCClient
|
||||
@@ -173,11 +170,11 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, errors.Wrap(err, "could not setup deposit trie")
|
||||
return nil, errors.Wrap(err, "could not set up deposit trie")
|
||||
}
|
||||
genState, err := transition.EmptyGenesisState()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not setup genesis state")
|
||||
return nil, errors.Wrap(err, "could not set up genesis state")
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
@@ -201,7 +198,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
},
|
||||
lastReceivedMerkleIndex: -1,
|
||||
preGenesisState: genState,
|
||||
headTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
|
||||
eth1HeadTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -225,7 +222,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Start a web3 service's main event loop.
|
||||
// Start the powchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
log.WithError(err).Error("Could not connect to execution endpoint")
|
||||
@@ -372,7 +369,7 @@ func (s *Service) ETH1ConnectionErrors() []error {
|
||||
|
||||
// refers to the latest eth1 block which follows the condition: eth1_timestamp +
|
||||
// SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time
|
||||
func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
|
||||
func (s *Service) followedBlockHeight(_ context.Context) (uint64, error) {
|
||||
latestValidBlock := uint64(0)
|
||||
if s.latestEth1Data.BlockHeight > params.BeaconConfig().Eth1FollowDistance {
|
||||
latestValidBlock = s.latestEth1Data.BlockHeight - params.BeaconConfig().Eth1FollowDistance
|
||||
@@ -386,8 +383,7 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
}
|
||||
s.cfg.depositCache.InsertDepositContainers(ctx, ctrs)
|
||||
if !s.chainStartData.Chainstarted {
|
||||
// do not add to pending cache
|
||||
// if no genesis state exists.
|
||||
// Do not add to pending cache if no genesis state exists.
|
||||
validDepositsCount.Add(float64(s.preGenesisState.Eth1DepositIndex()))
|
||||
return nil
|
||||
}
|
||||
@@ -395,7 +391,7 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Default to all deposits post-genesis deposits in
|
||||
// Default to all post-genesis deposits in
|
||||
// the event we cannot find a finalized state.
|
||||
currIndex := genesisState.Eth1DepositIndex()
|
||||
chkPt, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
|
||||
@@ -411,17 +407,17 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
// Set deposit index to the one in the current archived state.
|
||||
currIndex = fState.Eth1DepositIndex()
|
||||
|
||||
// when a node pauses for some time and starts again, the deposits to finalize
|
||||
// accumulates. we finalize them here before we are ready to receive a block.
|
||||
// When a node pauses for some time and starts again, the deposits to finalize
|
||||
// accumulates. We finalize them here before we are ready to receive a block.
|
||||
// Otherwise, the first few blocks will be slower to compute as we will
|
||||
// hold the lock and be busy finalizing the deposits.
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// to be included (rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
actualIndex := int64(currIndex) - 1 // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex)
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.depositCache.PruneProofs(ctx, actualIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
@@ -498,8 +494,7 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// safelyHandleHeader will recover and log any panic that occurs from the
|
||||
// block
|
||||
// safelyHandleHeader will recover and log any panic that occurs from the block
|
||||
func safelyHandlePanic() {
|
||||
if r := recover(); r != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -522,7 +517,7 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
log.Warn("Execution client is not syncing")
|
||||
}
|
||||
if !s.chainStartData.Chainstarted {
|
||||
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
|
||||
if err := s.processChainStartFromBlockNum(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
|
||||
s.runError = err
|
||||
log.Error(err)
|
||||
return
|
||||
@@ -530,9 +525,8 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
}
|
||||
// If the last requested block has not changed,
|
||||
// we do not request batched logs as this means there are no new
|
||||
// logs for the powchain service to process. Also is a potential
|
||||
// failure condition as would mean we have not respected the protocol
|
||||
// threshold.
|
||||
// logs for the powchain service to process. Also it is a potential
|
||||
// failure condition as would mean we have not respected the protocol threshold.
|
||||
if s.latestEth1Data.LastRequestedBlock == s.latestEth1Data.BlockHeight {
|
||||
log.Error("Beacon node is not respecting the follow distance")
|
||||
return
|
||||
@@ -595,7 +589,7 @@ func (s *Service) initPOWService() {
|
||||
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 {
|
||||
genHash := common.BytesToHash(s.chainStartData.Eth1Data.BlockHash)
|
||||
genBlock := s.chainStartData.GenesisBlock
|
||||
// In the event our provided chainstart data references a non-existent blockhash
|
||||
// In the event our provided chainstart data references a non-existent block hash,
|
||||
// we assume the genesis block to be 0.
|
||||
if genHash != [32]byte{} {
|
||||
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
|
||||
@@ -618,7 +612,7 @@ func (s *Service) initPOWService() {
|
||||
}
|
||||
}
|
||||
|
||||
// run subscribes to all the services for the ETH1.0 chain.
|
||||
// run subscribes to all the services for the eth1 chain.
|
||||
func (s *Service) run(done <-chan struct{}) {
|
||||
s.runError = nil
|
||||
|
||||
@@ -636,7 +630,7 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
s.updateConnectedETH1(false)
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case <-s.headTicker.C:
|
||||
case <-s.eth1HeadTicker.C:
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
s.pollConnectionStatus(s.ctx)
|
||||
@@ -692,10 +686,10 @@ func (s *Service) logTillChainStart(ctx context.Context) {
|
||||
}
|
||||
|
||||
// cacheHeadersForEth1DataVote makes sure that voting for eth1data after startup utilizes cached headers
|
||||
// instead of making multiple RPC requests to the ETH1 endpoint.
|
||||
// instead of making multiple RPC requests to the eth1 endpoint.
|
||||
func (s *Service) cacheHeadersForEth1DataVote(ctx context.Context) error {
|
||||
// Find the end block to request from.
|
||||
end, err := s.followBlockHeight(ctx)
|
||||
end, err := s.followedBlockHeight(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -739,12 +733,12 @@ func (s *Service) cacheBlockHeaders(start, end uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// determines the earliest voting block from which to start caching all our previous headers from.
|
||||
// Determines the earliest voting block from which to start caching all our previous headers from.
|
||||
func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock uint64) (uint64, error) {
|
||||
genesisTime := s.chainStartData.GenesisTime
|
||||
currSlot := slots.CurrentSlot(genesisTime)
|
||||
|
||||
// In the event genesis has not occurred yet, we just request go back follow_distance blocks.
|
||||
// In the event genesis has not occurred yet, we just request to go back follow_distance blocks.
|
||||
if genesisTime == 0 || currSlot == 0 {
|
||||
earliestBlk := uint64(0)
|
||||
if followBlock > params.BeaconConfig().Eth1FollowDistance {
|
||||
@@ -773,9 +767,12 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
if eth1DataInDB == nil {
|
||||
return nil
|
||||
}
|
||||
s.depositTrie = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
s.chainStartData = eth1DataInDB.ChainstartData
|
||||
var err error
|
||||
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.chainStartData = eth1DataInDB.ChainstartData
|
||||
if !reflect.ValueOf(eth1DataInDB.BeaconState).IsZero() {
|
||||
s.preGenesisState, err = v1.InitializeFromProto(eth1DataInDB.BeaconState)
|
||||
if err != nil {
|
||||
@@ -791,7 +788,7 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
return nil
|
||||
}
|
||||
|
||||
// validates that all deposit containers are valid and have their relevant indices
|
||||
// Validates that all deposit containers are valid and have their relevant indices
|
||||
// in order.
|
||||
func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
|
||||
ctrLen := len(ctrs)
|
||||
@@ -814,7 +811,7 @@ func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// validates the current powchain data saved and makes sure that any
|
||||
// Validates the current powchain data is saved and makes sure that any
|
||||
// embedded genesis state is correctly accounted for.
|
||||
func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
genState, err := s.cfg.beaconDB.GenesisState(ctx)
|
||||
|
||||
@@ -267,7 +267,7 @@ func TestFollowBlock_OK(t *testing.T) {
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
|
||||
h, err := web3Service.followBlockHeight(context.Background())
|
||||
h, err := web3Service.followedBlockHeight(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, baseHeight, h, "Unexpected block height")
|
||||
numToForward := uint64(2)
|
||||
@@ -280,7 +280,7 @@ func TestFollowBlock_OK(t *testing.T) {
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
|
||||
h, err = web3Service.followBlockHeight(context.Background())
|
||||
h, err = web3Service.followedBlockHeight(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedHeight, h, "Unexpected block height")
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ type EngineClient struct {
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
OverrideValidHash [32]byte
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -37,8 +38,11 @@ func (e *EngineClient) NewPayload(_ context.Context, _ *pb.ExecutionPayload) ([]
|
||||
|
||||
// ForkchoiceUpdated --
|
||||
func (e *EngineClient) ForkchoiceUpdated(
|
||||
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
|
||||
_ context.Context, fcs *pb.ForkchoiceState, _ *pb.PayloadAttributes,
|
||||
) (*pb.PayloadIDBytes, []byte, error) {
|
||||
if e.OverrideValidHash != [32]byte{} && bytesutil.ToBytes32(fcs.HeadBlockHash) == e.OverrideValidHash {
|
||||
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, nil
|
||||
}
|
||||
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, e.ErrForkchoiceUpdated
|
||||
}
|
||||
|
||||
|
||||
@@ -71,8 +71,3 @@ func (_ *FaultyMockPOWChain) ClearPreGenesisData() {
|
||||
func (_ *FaultyMockPOWChain) IsConnectedToETH1() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// BlockExistsWithCache --
|
||||
func (f *FaultyMockPOWChain) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
|
||||
return f.BlockExists(ctx, hash)
|
||||
}
|
||||
|
||||
@@ -179,11 +179,6 @@ func (m *POWChain) InsertBlock(height int, time uint64, hash []byte) *POWChain {
|
||||
return m
|
||||
}
|
||||
|
||||
// BlockExistsWithCache --
|
||||
func (m *POWChain) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
|
||||
return m.BlockExists(ctx, hash)
|
||||
}
|
||||
|
||||
func SetupRPCServer() (*rpc.Server, string, error) {
|
||||
srv := rpc.NewServer()
|
||||
if err := srv.RegisterName("eth", &testETHRPC{}); err != nil {
|
||||
|
||||
@@ -16,44 +16,78 @@ import (
|
||||
"github.com/r3labs/sse"
|
||||
)
|
||||
|
||||
const (
|
||||
versionHeader = "Eth-Consensus-Version"
|
||||
grpcVersionHeader = "Grpc-metadata-Eth-Consensus-Version"
|
||||
)
|
||||
|
||||
type sszConfig struct {
|
||||
sszPath string
|
||||
fileName string
|
||||
responseJson sszResponseJson
|
||||
responseJson sszResponse
|
||||
}
|
||||
|
||||
func handleGetBeaconStateSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
sszPath: "/eth/v1/debug/beacon/states/{state_id}/ssz",
|
||||
fileName: "beacon_state.ssz",
|
||||
responseJson: &beaconStateSSZResponseJson{},
|
||||
responseJson: &sszResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
sszPath: "/eth/v1/beacon/blocks/{block_id}/ssz",
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &blockSSZResponseJson{},
|
||||
responseJson: &sszResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconStateSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
sszPath: "/eth/v2/debug/beacon/states/{state_id}/ssz",
|
||||
fileName: "beacon_state.ssz",
|
||||
responseJson: &beaconStateSSZResponseV2Json{},
|
||||
responseJson: &versionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconBlockSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
sszPath: "/eth/v2/beacon/blocks/{block_id}/ssz",
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &blockSSZResponseV2Json{},
|
||||
responseJson: &versionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleSubmitBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
return handlePostSSZ(m, endpoint, w, req, sszConfig{})
|
||||
}
|
||||
|
||||
func handleSubmitBlindedBlockSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (handled bool) {
|
||||
return handlePostSSZ(m, endpoint, w, req, sszConfig{})
|
||||
}
|
||||
|
||||
func handleProduceBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "produce_beacon_block.ssz",
|
||||
responseJson: &versionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleProduceBlindedBlockSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "produce_blinded_beacon_block.ssz",
|
||||
responseJson: &versionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
@@ -69,7 +103,7 @@ func handleGetSSZ(
|
||||
return false
|
||||
}
|
||||
|
||||
if errJson := prepareSSZRequestForProxying(m, endpoint, req, config.sszPath); errJson != nil {
|
||||
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
@@ -112,6 +146,53 @@ func handleGetSSZ(
|
||||
return true
|
||||
}
|
||||
|
||||
func handlePostSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
config sszConfig,
|
||||
) (handled bool) {
|
||||
if !sszPosted(req) {
|
||||
return false
|
||||
}
|
||||
|
||||
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
prepareCustomHeaders(req)
|
||||
if errJson := preparePostedSSZData(req); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
grpcResponse, errJson := m.ProxyRequest(req)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
grpcResponseBody, errJson := apimiddleware.ReadGrpcResponseBody(grpcResponse.Body)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if respHasError {
|
||||
return
|
||||
}
|
||||
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func sszRequested(req *http.Request) bool {
|
||||
accept, ok := req.Header["Accept"]
|
||||
if !ok {
|
||||
@@ -125,24 +206,57 @@ func sszRequested(req *http.Request) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func prepareSSZRequestForProxying(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
req *http.Request, sszPath string,
|
||||
) apimiddleware.ErrorJson {
|
||||
func sszPosted(req *http.Request) bool {
|
||||
ct, ok := req.Header["Content-Type"]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if len(ct) != 1 {
|
||||
return false
|
||||
}
|
||||
return ct[0] == "application/octet-stream"
|
||||
}
|
||||
|
||||
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
|
||||
req.URL.Scheme = "http"
|
||||
req.URL.Host = m.GatewayAddress
|
||||
req.RequestURI = ""
|
||||
req.URL.Path = sszPath
|
||||
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, []string{}); errJson != nil {
|
||||
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, endpoint.RequestURLLiterals); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
// We have to add the prefix after handling parameters because adding the prefix changes URL segment indexing.
|
||||
req.URL.Path = "/internal" + req.URL.Path
|
||||
if errJson := apimiddleware.HandleQueryParameters(req, endpoint.RequestQueryParams); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
// We have to add new segments after handling parameters because it changes URL segment indexing.
|
||||
req.URL.Path = "/internal" + req.URL.Path + "/ssz"
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeMiddlewareResponseIntoSSZ(respJson sszResponseJson) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
|
||||
func prepareCustomHeaders(req *http.Request) {
|
||||
ver := req.Header.Get(versionHeader)
|
||||
if ver != "" {
|
||||
req.Header.Del(versionHeader)
|
||||
req.Header.Add(grpcVersionHeader, ver)
|
||||
}
|
||||
}
|
||||
|
||||
func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
|
||||
}
|
||||
j := sszRequestJson{Data: base64.StdEncoding.EncodeToString(buf)}
|
||||
data, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return apimiddleware.InternalServerErrorWithMessage(err, "could not prepare POST data")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(data))
|
||||
req.ContentLength = int64(len(data))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeMiddlewareResponseIntoSSZ(respJson sszResponse) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
|
||||
// Serialize the SSZ part of the deserialized value.
|
||||
data, err := base64.StdEncoding.DecodeString(respJson.SSZData())
|
||||
if err != nil {
|
||||
@@ -168,7 +282,7 @@ func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWrite
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
|
||||
w.Header().Set("Eth-Consensus-Version", respVersion)
|
||||
w.Header().Set(versionHeader, respVersion)
|
||||
if statusCodeHeader != "" {
|
||||
code, err := strconv.Atoi(statusCodeHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -70,11 +70,21 @@ func TestPrepareSSZRequestForProxying(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
request := httptest.NewRequest("GET", "http://foo.example", &body)
|
||||
|
||||
errJson := prepareSSZRequestForProxying(middleware, endpoint, request, "/ssz")
|
||||
errJson := prepareSSZRequestForProxying(middleware, endpoint, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/internal/ssz", request.URL.Path)
|
||||
}
|
||||
|
||||
func TestPreparePostedSszData(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
body.Write([]byte("body"))
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
preparePostedSSZData(request)
|
||||
assert.Equal(t, int64(19), request.ContentLength)
|
||||
assert.Equal(t, "application/json", request.Header.Get("Content-Type"))
|
||||
}
|
||||
|
||||
func TestSerializeMiddlewareResponseIntoSSZ(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
j := testSSZResponseJson{
|
||||
@@ -133,7 +143,7 @@ func TestWriteSSZResponseHeaderAndBody(t *testing.T) {
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "attachment; filename=test.ssz", v[0])
|
||||
v, ok = writer.Header()["Eth-Consensus-Version"]
|
||||
v, ok = writer.Header()[versionHeader]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "version", v[0])
|
||||
|
||||
@@ -110,11 +110,13 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlockPostRequest,
|
||||
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlockSSZ}
|
||||
case "/eth/v1/beacon/blinded_blocks":
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlindedBlockPostRequest,
|
||||
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleSubmitBlindedBlockSSZ}
|
||||
case "/eth/v1/beacon/blocks/{block_id}":
|
||||
endpoint.GetResponse = &blockResponseJson{}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZ}
|
||||
@@ -221,6 +223,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedV2Block,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlockSSZ}
|
||||
case "/eth/v1/validator/blinded_blocks/{slot}":
|
||||
endpoint.GetResponse = &produceBlindedBlockResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"slot"}
|
||||
@@ -228,6 +231,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
|
||||
case "/eth/v1/validator/attestation_data":
|
||||
endpoint.GetResponse = &produceAttestationDataResponseJson{}
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
|
||||
|
||||
@@ -7,6 +7,10 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
)
|
||||
|
||||
//----------------
|
||||
// Requests and responses.
|
||||
//----------------
|
||||
|
||||
// genesisResponseJson is used in /beacon/genesis API endpoint.
|
||||
type genesisResponseJson struct {
|
||||
Data *genesisResponse_GenesisJson `json:"data"`
|
||||
@@ -457,7 +461,7 @@ type blindedBeaconBlockBodyBellatrixJson struct {
|
||||
Deposits []*depositJson `json:"deposits"`
|
||||
VoluntaryExits []*signedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *syncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *executionPayloadHeaderJson `json:"execution_payload"`
|
||||
ExecutionPayloadHeader *executionPayloadHeaderJson `json:"execution_payload_header"`
|
||||
}
|
||||
|
||||
type executionPayloadJson struct {
|
||||
@@ -489,7 +493,7 @@ type executionPayloadHeaderJson struct {
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||
}
|
||||
@@ -833,63 +837,38 @@ type syncCommitteeContributionJson struct {
|
||||
// SSZ
|
||||
// ---------------
|
||||
|
||||
// sszResponseJson is a common abstraction over all SSZ responses.
|
||||
type sszResponseJson interface {
|
||||
type sszRequestJson struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
// sszResponse is a common abstraction over all SSZ responses.
|
||||
type sszResponse interface {
|
||||
SSZVersion() string
|
||||
SSZData() string
|
||||
}
|
||||
|
||||
// blockSSZResponseJson is used in /beacon/blocks/{block_id} API endpoint.
|
||||
type blockSSZResponseJson struct {
|
||||
type sszResponseJson struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *blockSSZResponseJson) SSZData() string {
|
||||
func (ssz *sszResponseJson) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (*blockSSZResponseJson) SSZVersion() string {
|
||||
func (*sszResponseJson) SSZVersion() string {
|
||||
return strings.ToLower(ethpbv2.Version_PHASE0.String())
|
||||
}
|
||||
|
||||
// blockSSZResponseV2Json is used in /v2/beacon/blocks/{block_id} API endpoint.
|
||||
type blockSSZResponseV2Json struct {
|
||||
type versionedSSZResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *blockSSZResponseV2Json) SSZData() string {
|
||||
func (ssz *versionedSSZResponseJson) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (ssz *blockSSZResponseV2Json) SSZVersion() string {
|
||||
return ssz.Version
|
||||
}
|
||||
|
||||
// beaconStateSSZResponseJson is used in /debug/beacon/states/{state_id} API endpoint.
|
||||
type beaconStateSSZResponseJson struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *beaconStateSSZResponseJson) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (*beaconStateSSZResponseJson) SSZVersion() string {
|
||||
return strings.ToLower(ethpbv2.Version_PHASE0.String())
|
||||
}
|
||||
|
||||
// beaconStateSSZResponseV2Json is used in /v2/debug/beacon/states/{state_id} API endpoint.
|
||||
type beaconStateSSZResponseV2Json struct {
|
||||
Version string `json:"version"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
func (ssz *beaconStateSSZResponseV2Json) SSZData() string {
|
||||
return ssz.Data
|
||||
}
|
||||
|
||||
func (ssz *beaconStateSSZResponseV2Json) SSZVersion() string {
|
||||
func (ssz *versionedSSZResponseJson) SSZVersion() string {
|
||||
return ssz.Version
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ go_library(
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
@@ -56,6 +57,7 @@ go_library(
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
@@ -114,6 +116,7 @@ go_test(
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
|
||||
@@ -18,16 +18,21 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
const versionHeader = "eth-consensus-version"
|
||||
|
||||
// blockIdParseError represents an error scenario where a block ID could not be parsed.
|
||||
type blockIdParseError struct {
|
||||
message string
|
||||
@@ -109,7 +114,7 @@ func (bs *Server) GetBlockHeader(ctx context.Context, req *ethpbv1.BlockRequest)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoot)
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
@@ -175,7 +180,7 @@ func (bs *Server) ListBlockHeaders(ctx context.Context, req *ethpbv1.BlockHeader
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
|
||||
}
|
||||
if !isOptimistic {
|
||||
isOptimistic, err = bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
|
||||
isOptimistic, err = bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
@@ -223,6 +228,45 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitBlockSSZ instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be
|
||||
// included in the beacon chain. The beacon node is not required to validate the signed BeaconBlock, and a successful
|
||||
// response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the
|
||||
// new block into its state, and therefore validate the block internally, however blocks which fail the validation are
|
||||
// still broadcast but a different status code is returned (202).
|
||||
//
|
||||
// The provided block must be SSZ-serialized.
|
||||
func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
|
||||
}
|
||||
ver := md.Get(versionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
|
||||
}
|
||||
unmarshaler, err := detect.FromForkVersion(forkVer)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
|
||||
}
|
||||
block, err := unmarshaler.UnmarshalBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
|
||||
// and publish a `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
|
||||
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
|
||||
@@ -259,6 +303,48 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockSSZ instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
|
||||
// and publish a `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
|
||||
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
|
||||
// to be included in the beacon chain. The beacon node is not required to validate the signed
|
||||
// `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
|
||||
// successful. The beacon node is expected to integrate the new block into its state, and
|
||||
// therefore validate the block internally, however blocks which fail the validation are still
|
||||
// broadcast but a different status code is returned (202).
|
||||
//
|
||||
// The provided block must be SSZ-serialized.
|
||||
func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
|
||||
}
|
||||
ver := md.Get(versionHeader)
|
||||
if len(ver) == 0 {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
|
||||
}
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
forkVer, err := schedule.VersionForName(ver[0])
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not determine fork version: %v", err)
|
||||
}
|
||||
unmarshaler, err := detect.FromForkVersion(forkVer)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not create unmarshaler: %v", err)
|
||||
}
|
||||
block, err := unmarshaler.UnmarshalBlindedBeaconBlock(req.Data)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
|
||||
}
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
|
||||
}
|
||||
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
|
||||
}
|
||||
|
||||
// GetBlock retrieves block details for given block ID.
|
||||
func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlock")
|
||||
@@ -371,7 +457,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
@@ -393,7 +479,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
}
|
||||
|
||||
// GetBlockSSZV2 returns the SSZ-serialized version of the beacon block for given block ID.
|
||||
func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.BlockSSZResponseV2, error) {
|
||||
func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockSSZV2")
|
||||
defer span.End()
|
||||
|
||||
@@ -413,7 +499,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
|
||||
}
|
||||
return ðpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_PHASE0, Data: sszBlock}, nil
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_PHASE0, Data: sszBlock}, nil
|
||||
}
|
||||
// ErrUnsupportedPhase0Block means that we have another block type
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedPhase0Block) {
|
||||
@@ -437,7 +523,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
|
||||
}
|
||||
return ðpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_ALTAIR, Data: sszData}, nil
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_ALTAIR, Data: sszData}, nil
|
||||
}
|
||||
// ErrUnsupportedAltairBlock means that we have another block type
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedAltairBlock) {
|
||||
@@ -461,7 +547,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
|
||||
}
|
||||
return ðpbv2.BlockSSZResponseV2{Version: ethpbv2.Version_BELLATRIX, Data: sszData}, nil
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_BELLATRIX, Data: sszData}, nil
|
||||
}
|
||||
// ErrUnsupportedBellatrixBlock means that we have another block type
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedBellatrixBlock) {
|
||||
@@ -543,7 +629,7 @@ func (bs *Server) GetBlockRoot(ctx context.Context, req *ethpbv1.BlockRequest) (
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
@@ -616,7 +702,7 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (*ethpbalpha.SignedBeaconBlock, []*ethpbalpha.BeaconBlockContainer) {
|
||||
@@ -187,9 +189,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -287,9 +290,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
header, err := bs.GetBlockHeader(ctx, ðpbv1.BlockRequest{BlockId: []byte("head")})
|
||||
require.NoError(t, err)
|
||||
@@ -312,9 +316,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
}
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
@@ -416,9 +421,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
}
|
||||
slot := types.Slot(30)
|
||||
headers, err := bs.ListBlockHeaders(ctx, ðpbv1.BlockHeadersRequest{
|
||||
@@ -557,6 +563,289 @@ func TestServer_SubmitBlock_OK(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_SubmitBlockSSZ_OK(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlock()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockAltair()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlockAltair()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
// INFO: This code block can be removed once Bellatrix
|
||||
// fork epoch is set to a value other than math.MaxUint64
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
|
||||
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockBellatrix()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlockBellatrix()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_SubmitBlindedBlockSSZ_OK(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlock()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "phase0")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockAltair()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBeaconBlockAltair()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "altair")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
// INFO: This code block can be removed once Bellatrix
|
||||
// fork epoch is set to a value other than math.MaxUint64
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = cfg.AltairForkEpoch + 1000
|
||||
cfg.ForkVersionSchedule[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = cfg.AltairForkEpoch + 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockBellatrix()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
HeadFetcher: c,
|
||||
}
|
||||
req := util.NewBlindedBeaconBlockBellatrix()
|
||||
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wrapped, err = wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockSsz, err := req.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
blockReq := ðpbv2.SSZContainer{
|
||||
Data: blockSsz,
|
||||
}
|
||||
md := metadata.MD{}
|
||||
md.Set(versionHeader, "bellatrix")
|
||||
sszCtx := metadata.NewIncomingContext(ctx, md)
|
||||
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
@@ -836,9 +1125,10 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
@@ -955,9 +1245,10 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
|
||||
@@ -1074,9 +1365,10 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
|
||||
@@ -1194,9 +1486,10 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlockV2(ctx, ðpbv2.BlockRequestV2{
|
||||
@@ -1395,9 +1688,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
@@ -1485,9 +1779,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
}
|
||||
blockRootResp, err := bs.GetBlockRoot(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("head"),
|
||||
@@ -1513,9 +1808,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
|
||||
@@ -1615,9 +1911,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
|
||||
@@ -1717,9 +2014,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
|
||||
@@ -1820,9 +2118,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
Optimistic: true,
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
resp, err := bs.ListBlockAttestations(ctx, ðpbv1.BlockRequest{
|
||||
BlockId: []byte("head"),
|
||||
|
||||
@@ -34,7 +34,9 @@ type Server struct {
|
||||
StateGenService stategen.StateManager
|
||||
StateFetcher statefetcher.Fetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
V1Alpha1ValidatorServer *v1alpha1validator.Server
|
||||
SyncChecker sync.Checker
|
||||
CanonicalHistory *stategen.CanonicalHistory
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (bs *Server) GetStateRoot(ctx context.Context, req *ethpb.StateRequest) (*e
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -103,7 +103,7 @@ func (bs *Server) GetStateFork(ctx context.Context, req *ethpb.StateRequest) (*e
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
fork := st.Fork()
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -133,7 +133,7 @@ func (bs *Server) GetFinalityCheckpoints(ctx context.Context, req *ethpb.StateRe
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -80,13 +80,15 @@ func TestGetStateRoot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := server.GetStateRoot(context.Background(), ð.StateRequest{
|
||||
@@ -107,13 +109,15 @@ func TestGetStateRoot(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconStateRoot: stateRoot[:],
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetStateRoot(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -138,12 +142,14 @@ func TestGetStateFork(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := server.GetStateFork(ctx, ð.StateRequest{
|
||||
@@ -167,12 +173,14 @@ func TestGetStateFork(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetStateFork(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -204,12 +212,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &chainMock.ChainService{}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := server.GetFinalityCheckpoints(ctx, ð.StateRequest{
|
||||
@@ -235,12 +245,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetFinalityCheckpoints(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
|
||||
@@ -91,7 +91,7 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
|
||||
return nil, status.Errorf(codes.Internal, "Could not extract sync subcommittees: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -162,6 +162,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &mock.ChainService{}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -169,8 +170,9 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
req := ðpbv2.StateSyncCommitteesRequest{StateId: stRoot[:]}
|
||||
resp, err := s.ListSyncCommittees(ctx, req)
|
||||
@@ -205,6 +207,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &mock.ChainService{Optimistic: true}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -212,8 +215,9 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := s.ListSyncCommittees(ctx, req)
|
||||
require.NoError(t, err)
|
||||
@@ -261,6 +265,7 @@ func TestListSyncCommitteesFuture(t *testing.T) {
|
||||
}))
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &mock.ChainService{}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -268,8 +273,9 @@ func TestListSyncCommitteesFuture(t *testing.T) {
|
||||
StateFetcher: &futureSyncMockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
req := ðpbv2.StateSyncCommitteesRequest{}
|
||||
epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
|
||||
@@ -57,7 +57,7 @@ func (bs *Server) GetValidator(ctx context.Context, req *ethpb.StateValidatorReq
|
||||
return nil, status.Error(codes.NotFound, "Could not find validator")
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
|
||||
return nil, handleValContainerErr(err)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.Validato
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -202,7 +202,7 @@ func (bs *Server) ListCommittees(ctx context.Context, req *ethpb.StateCommittees
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -33,12 +33,14 @@ func TestGetValidator(t *testing.T) {
|
||||
st, _ = util.DeterministicGenesisState(t, 8192)
|
||||
|
||||
t.Run("Head Get Validator by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.GetValidator(ctx, ðpb.StateValidatorRequest{
|
||||
@@ -50,12 +52,14 @@ func TestGetValidator(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head Get Validator by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
pubKey := st.PubkeyAtIndex(types.ValidatorIndex(20))
|
||||
@@ -93,12 +97,14 @@ func TestGetValidator(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := s.GetValidator(ctx, ðpb.StateValidatorRequest{
|
||||
StateId: []byte("head"),
|
||||
@@ -117,12 +123,14 @@ func TestListValidators(t *testing.T) {
|
||||
st, _ = util.DeterministicGenesisState(t, 8192)
|
||||
|
||||
t.Run("Head List All Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -136,12 +144,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
|
||||
@@ -158,12 +168,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
idNums := []types.ValidatorIndex{20, 66, 90, 100}
|
||||
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
|
||||
@@ -184,12 +196,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators by both index and pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
idNums := []types.ValidatorIndex{20, 90, 170, 129}
|
||||
@@ -212,12 +226,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Unknown public key is ignored", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
existingKey := st.PubkeyAtIndex(types.ValidatorIndex(1))
|
||||
@@ -232,12 +248,14 @@ func TestListValidators(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Unknown index is ignored", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
ids := [][]byte{[]byte("1"), []byte("99999")}
|
||||
@@ -261,12 +279,14 @@ func TestListValidators(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
StateId: []byte("head"),
|
||||
@@ -349,12 +369,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("Head List All ACTIVE Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -384,12 +406,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List All ACTIVE_ONGOING Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -418,12 +442,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*35))
|
||||
t.Run("Head List All EXITED Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -451,12 +477,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List All PENDING_INITIALIZED and EXITED_UNSLASHED Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -484,12 +512,14 @@ func TestListValidators_Status(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List All PENDING and EXITED Validators", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{State: st},
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListValidators(ctx, ðpb.StateValidatorsRequest{
|
||||
@@ -532,12 +562,14 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
|
||||
t.Run("Head List Validators Balance by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
|
||||
@@ -554,12 +586,14 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators Balance by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
idNums := []types.ValidatorIndex{20, 66, 90, 100}
|
||||
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
|
||||
@@ -579,12 +613,14 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head List Validators Balance by both index and pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
idNums := []types.ValidatorIndex{20, 90, 170, 129}
|
||||
@@ -613,12 +649,14 @@ func TestListValidatorBalances(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
|
||||
@@ -640,12 +678,14 @@ func TestListCommittees(t *testing.T) {
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
t.Run("Head All Committees", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListCommittees(ctx, ðpb.StateCommitteesRequest{
|
||||
@@ -660,12 +700,14 @@ func TestListCommittees(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head All Committees of Epoch 10", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
epoch := types.Epoch(10)
|
||||
resp, err := s.ListCommittees(ctx, ðpb.StateCommitteesRequest{
|
||||
@@ -679,12 +721,14 @@ func TestListCommittees(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head All Committees of Slot 4", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
slot := types.Slot(4)
|
||||
@@ -704,12 +748,14 @@ func TestListCommittees(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head All Committees of Index 1", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
index := types.CommitteeIndex(1)
|
||||
@@ -729,12 +775,14 @@ func TestListCommittees(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Head All Committees of Slot 2, Index 1", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
index := types.CommitteeIndex(1)
|
||||
@@ -764,12 +812,14 @@ func TestListCommittees(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &chainMock.ChainService{Optimistic: true}
|
||||
s := Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: &chainMock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
resp, err := s.ListCommittees(ctx, ðpb.StateCommitteesRequest{
|
||||
|
||||
@@ -39,7 +39,7 @@ func (ds *Server) GetBeaconState(ctx context.Context, req *ethpbv1.StateRequest)
|
||||
}
|
||||
|
||||
// GetBeaconStateSSZ returns the SSZ-serialized version of the full beacon state object for given state ID.
|
||||
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv1.BeaconStateSSZResponse, error) {
|
||||
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZ")
|
||||
defer span.End()
|
||||
|
||||
@@ -53,7 +53,7 @@ func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateReque
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal state into SSZ: %v", err)
|
||||
}
|
||||
|
||||
return ðpbv1.BeaconStateSSZResponse{Data: sszState}, nil
|
||||
return ðpbv2.SSZContainer{Data: sszState}, nil
|
||||
}
|
||||
|
||||
// GetBeaconStateV2 returns the full beacon state for a given state ID.
|
||||
@@ -65,7 +65,7 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.HeadFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -121,21 +121,32 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
|
||||
}
|
||||
|
||||
// GetBeaconStateSSZV2 returns the SSZ-serialized version of the full beacon state object for given state ID.
|
||||
func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.StateRequestV2) (*ethpbv2.BeaconStateSSZResponseV2, error) {
|
||||
func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.StateRequestV2) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZV2")
|
||||
defer span.End()
|
||||
|
||||
state, err := ds.StateFetcher.State(ctx, req.StateId)
|
||||
st, err := ds.StateFetcher.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
|
||||
sszState, err := state.MarshalSSZ()
|
||||
sszState, err := st.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal state into SSZ: %v", err)
|
||||
}
|
||||
var ver ethpbv2.Version
|
||||
switch st.Version() {
|
||||
case version.Phase0:
|
||||
ver = ethpbv2.Version_PHASE0
|
||||
case version.Altair:
|
||||
ver = ethpbv2.Version_ALTAIR
|
||||
case version.Bellatrix:
|
||||
ver = ethpbv2.Version_BELLATRIX
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
|
||||
return ðpbv2.BeaconStateSSZResponseV2{Data: sszState}, nil
|
||||
return ðpbv2.SSZContainer{Data: sszState, Version: ver}, nil
|
||||
}
|
||||
|
||||
// ListForkChoiceHeads retrieves the leaves of the current fork choice tree.
|
||||
@@ -167,7 +178,7 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
|
||||
Data: make([]*ethpbv2.ForkChoiceHead, len(headRoots)),
|
||||
}
|
||||
for i := range headRoots {
|
||||
isOptimistic, err := ds.HeadFetcher.IsOptimisticForRoot(ctx, headRoots[i])
|
||||
isOptimistic, err := ds.OptimisticModeFetcher.IsOptimisticForRoot(ctx, headRoots[i])
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if head is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -44,8 +44,9 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.StateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -60,8 +61,9 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.StateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -76,8 +78,9 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.StateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -102,8 +105,9 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
OptimisticModeFetcher: &blockchainmock.ChainService{Optimistic: true},
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.StateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
@@ -153,6 +157,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
assert.NotNil(t, resp)
|
||||
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
@@ -171,6 +176,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
assert.NotNil(t, resp)
|
||||
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
fakeState, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
@@ -189,6 +195,7 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
assert.NotNil(t, resp)
|
||||
|
||||
assert.DeepEqual(t, sszState, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -238,8 +245,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
|
||||
}}
|
||||
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
server := &Server{
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
@@ -257,8 +266,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("optimistic head", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{Optimistic: true}
|
||||
server := &Server{
|
||||
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -12,7 +12,8 @@ import (
|
||||
// Server defines a server implementation of the gRPC Beacon Chain service,
|
||||
// providing RPC endpoints to access data relevant to the Ethereum Beacon Chain.
|
||||
type Server struct {
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
StateFetcher statefetcher.Fetcher
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
StateFetcher statefetcher.Fetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blo
|
||||
}
|
||||
|
||||
// IsOptimistic checks whether the latest block header of the passed in beacon state is the header of an optimistic block.
|
||||
func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockchain.HeadFetcher) (bool, error) {
|
||||
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticSyncFetcher blockchain.OptimisticModeFetcher) (bool, error) {
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get state root")
|
||||
@@ -50,7 +50,7 @@ func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockch
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get header root")
|
||||
}
|
||||
isOptimistic, err := headFetcher.IsOptimisticForRoot(ctx, headRoot)
|
||||
isOptimistic, err := optimisticSyncFetcher.IsOptimisticForRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not check if block is optimistic")
|
||||
}
|
||||
|
||||
@@ -58,14 +58,14 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("optimistic", func(t *testing.T) {
|
||||
mockHeadFetcher := &chainmock.ChainService{Optimistic: true}
|
||||
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
|
||||
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: true}
|
||||
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("not optimistic", func(t *testing.T) {
|
||||
mockHeadFetcher := &chainmock.ChainService{Optimistic: false}
|
||||
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
|
||||
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: false}
|
||||
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
|
||||
@@ -13,13 +13,15 @@ import (
|
||||
// Server defines a server implementation of the gRPC Validator service,
|
||||
// providing RPC endpoints intended for validator clients.
|
||||
type Server struct {
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
AttestationsPool attestations.Pool
|
||||
PeerManager p2p.PeerManager
|
||||
Broadcaster p2p.Broadcaster
|
||||
StateFetcher statefetcher.Fetcher
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
V1Alpha1Server *v1alpha1validator.Server
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
AttestationsPool attestations.Pool
|
||||
PeerManager p2p.PeerManager
|
||||
Broadcaster p2p.Broadcaster
|
||||
StateFetcher statefetcher.Fetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
V1Alpha1Server *v1alpha1validator.Server
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -142,7 +142,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -258,7 +258,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
|
||||
return nil, status.Errorf(codes.Internal, "Could not get duties: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.HeadFetcher)
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -348,6 +348,76 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// ProduceBlockV2SSZ requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// The produced block is in SSZ form.
|
||||
func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
//
|
||||
@@ -413,6 +483,79 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// ProduceBlindedBlockSSZ requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// The produced block is in SSZ form.
|
||||
//
|
||||
// Pre-Bellatrix, this endpoint will return a regular block.
|
||||
func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2Blinded(bellatrixBlock.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := block.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ format: %v", err)
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: sszBlock,
|
||||
}, nil
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
func (vs *Server) PrepareBeaconProposer(
|
||||
ctx context.Context, request *ethpbv1.PrepareBeaconProposerRequest,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -46,4 +46,5 @@ type Server struct {
|
||||
StateGen stategen.StateManager
|
||||
SyncChecker sync.Checker
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
}
|
||||
|
||||
@@ -178,9 +178,10 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
as := &Server{
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: true},
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
}
|
||||
_, err := as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{})
|
||||
s, ok := status.FromError(err)
|
||||
@@ -191,10 +192,11 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
as = &Server{
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
SyncChecker: &mockSync.Sync{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
AttestationCache: cache.NewAttestationCache(),
|
||||
}
|
||||
_, err = as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -197,10 +197,16 @@ func (vs *Server) rebuildDepositTrie(ctx context.Context, canonicalEth1Data *eth
|
||||
|
||||
// validate that the provided deposit trie matches up with the canonical eth1 data provided.
|
||||
func validateDepositTrie(trie *trie.SparseMerkleTrie, canonicalEth1Data *ethpb.Eth1Data) (bool, error) {
|
||||
if trie == nil || canonicalEth1Data == nil {
|
||||
return false, errors.New("nil trie or eth1data provided")
|
||||
}
|
||||
if trie.NumOfItems() != int(canonicalEth1Data.DepositCount) {
|
||||
return false, errors.Errorf("wanted the canonical count of %d but received %d", canonicalEth1Data.DepositCount, trie.NumOfItems())
|
||||
}
|
||||
rt := trie.HashTreeRoot()
|
||||
rt, err := trie.HashTreeRoot()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bytes.Equal(rt[:], canonicalEth1Data.DepositRoot) {
|
||||
return false, errors.Errorf("wanted the canonical deposit root of %#x but received %#x", canonicalEth1Data.DepositRoot, rt)
|
||||
}
|
||||
|
||||
@@ -120,18 +120,20 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
|
||||
|
||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
|
||||
burnAddr := bytesutil.PadTo([]byte{}, fieldparams.FeeRecipientLength)
|
||||
switch err == nil {
|
||||
case true:
|
||||
feeRecipient = recipient
|
||||
case errors.As(err, kv.ErrNotFoundFeeRecipient):
|
||||
// If fee recipient is not found in DB and not set from beacon node CLI,
|
||||
// use the burn address.
|
||||
if bytes.Equal(feeRecipient.Bytes(), burnAddr) {
|
||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": vIdx,
|
||||
"burnAddress": burnAddr,
|
||||
}).Error("Fee recipient not set. Using burn address")
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not get fee recipient in db")
|
||||
|
||||
@@ -82,6 +82,10 @@ func (vs *Server) buildPhase0BlockData(ctx context.Context, req *ethpb.BlockRequ
|
||||
return nil, fmt.Errorf("syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
if err := vs.HeadUpdater.UpdateHead(ctx); err != nil {
|
||||
log.WithError(err).Error("Could not process attestations and update head")
|
||||
}
|
||||
|
||||
// Retrieve the parent block as the current head of the canonical chain.
|
||||
parentRoot, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -72,6 +72,7 @@ func TestProposer_GetBlock_OK(t *testing.T) {
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
@@ -158,6 +159,7 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
MockEth1Votes: true,
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
AttPool: attestations.NewPool(),
|
||||
@@ -503,10 +505,14 @@ func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to determine hashed value of deposit")
|
||||
|
||||
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
|
||||
}
|
||||
for _, dp := range recentDeposits {
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
|
||||
}
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
@@ -638,10 +644,14 @@ func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to determine hashed value of deposit")
|
||||
|
||||
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
|
||||
}
|
||||
for _, dp := range recentDeposits {
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
@@ -737,10 +747,14 @@ func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testin
|
||||
require.NoError(t, err, "Unable to determine hashed value of deposit")
|
||||
|
||||
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root))
|
||||
}
|
||||
for _, dp := range recentDeposits {
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot())
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
@@ -833,10 +847,14 @@ func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to determine hashed value of deposit")
|
||||
|
||||
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, root))
|
||||
}
|
||||
for _, dp := range recentDeposits {
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, depositTrie.HashTreeRoot())
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, root)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
@@ -927,10 +945,14 @@ func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to determine hashed value of deposit")
|
||||
|
||||
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root))
|
||||
}
|
||||
for _, dp := range recentDeposits {
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot())
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
@@ -1036,10 +1058,14 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to determine hashed value of deposit")
|
||||
|
||||
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
|
||||
}
|
||||
for _, dp := range recentDeposits {
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
@@ -1055,8 +1081,10 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
|
||||
trie, err := bs.depositTrie(ctx, ðpb.Eth1Data{}, big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)))
|
||||
require.NoError(t, err)
|
||||
|
||||
actualRoot := trie.HashTreeRoot()
|
||||
expectedRoot := depositTrie.HashTreeRoot()
|
||||
actualRoot, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
expectedRoot, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedRoot, actualRoot, "Incorrect deposit trie root")
|
||||
}
|
||||
|
||||
@@ -1146,10 +1174,14 @@ func TestProposer_DepositTrie_RebuildTrie(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to determine hashed value of deposit")
|
||||
|
||||
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
|
||||
}
|
||||
for _, dp := range recentDeposits {
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
|
||||
}
|
||||
d := depositCache.AllDepositContainers(ctx)
|
||||
origDeposit, ok := proto.Clone(d[0].Deposit).(*ethpb.Deposit)
|
||||
@@ -1177,8 +1209,10 @@ func TestProposer_DepositTrie_RebuildTrie(t *testing.T) {
|
||||
trie, err := bs.depositTrie(ctx, ðpb.Eth1Data{}, big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)))
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedRoot := depositTrie.HashTreeRoot()
|
||||
actualRoot := trie.HashTreeRoot()
|
||||
expectedRoot, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
actualRoot, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedRoot, actualRoot, "Incorrect deposit trie root")
|
||||
|
||||
}
|
||||
@@ -1230,7 +1264,8 @@ func TestProposer_ValidateDepositTrie(t *testing.T) {
|
||||
assert.NoError(t, trie.Insert([]byte{'a'}, 0))
|
||||
assert.NoError(t, trie.Insert([]byte{'b'}, 1))
|
||||
assert.NoError(t, trie.Insert([]byte{'c'}, 2))
|
||||
rt := trie.HashTreeRoot()
|
||||
rt, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
return ðpb.Eth1Data{DepositRoot: rt[:], DepositCount: 3, BlockHash: []byte{}}
|
||||
},
|
||||
trieCreator: func() *trie.SparseMerkleTrie {
|
||||
@@ -1274,7 +1309,9 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dc.Deposit, dc.Eth1BlockHeight, dc.Index, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dc.Deposit, dc.Eth1BlockHeight, dc.Index, root))
|
||||
|
||||
t.Run("choose highest count", func(t *testing.T) {
|
||||
t.Skip()
|
||||
@@ -1976,10 +2013,14 @@ func TestProposer_Deposits_ReturnsEmptyList_IfLatestEth1DataEqGenesisEth1Block(t
|
||||
require.NoError(t, err, "Unable to determine hashed value of deposit")
|
||||
|
||||
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root))
|
||||
}
|
||||
for _, dp := range recentDeposits {
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot())
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root)
|
||||
}
|
||||
|
||||
bs := &Server{
|
||||
@@ -2066,6 +2107,7 @@ func TestProposer_GetBeaconBlock_PreForkEpoch(t *testing.T) {
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
@@ -2178,6 +2220,7 @@ func TestProposer_GetBeaconBlock_PostForkEpoch(t *testing.T) {
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
@@ -2331,6 +2374,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
HeadUpdater: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: c,
|
||||
@@ -2369,7 +2413,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
|
||||
assert.DeepEqual(t, randaoReveal, bellatrixBlk.Bellatrix.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, bellatrixBlk.Bellatrix.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
|
||||
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
require.DeepEqual(t, payload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
|
||||
// Operator sets default fee recipient to not be burned through beacon node cli.
|
||||
@@ -2380,7 +2424,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
_, err = proposerServer.GetBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, newHook, "Fee recipient not set. Using burn address")
|
||||
require.LogsDoNotContain(t, newHook, "Fee recipient is currently using the burn address")
|
||||
}
|
||||
|
||||
func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
@@ -2393,7 +2437,7 @@ func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerServer := &Server{HeadFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
|
||||
proposerServer := &Server{OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ type Server struct {
|
||||
AttestationCache *cache.AttestationCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
@@ -50,6 +51,7 @@ type Server struct {
|
||||
DepositFetcher depositcache.DepositFetcher
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
Eth1InfoFetcher powchain.ChainInfoFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncChecker sync.Checker
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
|
||||
@@ -137,7 +137,9 @@ func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 10 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 10 /*blockNum*/, 0, root))
|
||||
trie, err := v1.InitializeFromProtoUnsafe(beaconState)
|
||||
require.NoError(t, err)
|
||||
vs := &Server{
|
||||
|
||||
@@ -255,7 +255,7 @@ func (vs *Server) optimisticStatus(ctx context.Context) error {
|
||||
if slots.ToEpoch(vs.TimeFetcher.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return nil
|
||||
}
|
||||
optimistic, err := vs.HeadFetcher.IsOptimistic(ctx)
|
||||
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,9 @@ func TestValidatorStatus_DepositedEth1(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
|
||||
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
|
||||
p := &mockPOW.POWChain{
|
||||
TimesByHeight: map[int]uint64{
|
||||
@@ -84,7 +86,9 @@ func TestValidatorStatus_Deposited(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
|
||||
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
|
||||
p := &mockPOW.POWChain{
|
||||
TimesByHeight: map[int]uint64{
|
||||
@@ -134,7 +138,9 @@ func TestValidatorStatus_PartiallyDeposited(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
|
||||
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
|
||||
p := &mockPOW.POWChain{
|
||||
TimesByHeight: map[int]uint64{
|
||||
@@ -202,7 +208,9 @@ func TestValidatorStatus_Pending(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
|
||||
|
||||
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
|
||||
p := &mockPOW.POWChain{
|
||||
@@ -247,7 +255,9 @@ func TestValidatorStatus_Active(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
|
||||
|
||||
// Active because activation epoch <= current epoch < exit epoch.
|
||||
activeEpoch := helpers.ActivationExitEpoch(0)
|
||||
@@ -334,7 +344,9 @@ func TestValidatorStatus_Exiting(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
|
||||
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
|
||||
p := &mockPOW.POWChain{
|
||||
TimesByHeight: map[int]uint64{
|
||||
@@ -391,7 +403,9 @@ func TestValidatorStatus_Slashing(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
|
||||
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
|
||||
p := &mockPOW.POWChain{
|
||||
TimesByHeight: map[int]uint64{
|
||||
@@ -449,7 +463,9 @@ func TestValidatorStatus_Exited(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
|
||||
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
|
||||
p := &mockPOW.POWChain{
|
||||
TimesByHeight: map[int]uint64{
|
||||
@@ -532,11 +548,15 @@ func TestActivationStatus_OK(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, root))
|
||||
|
||||
dep = deposits[2]
|
||||
assert.NoError(t, depositTrie.Insert(dep.Data.Signature, 15))
|
||||
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, depositTrie.HashTreeRoot()))
|
||||
root, err = depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, root))
|
||||
|
||||
vs := &Server{
|
||||
Ctx: context.Background(),
|
||||
@@ -583,7 +603,7 @@ func TestActivationStatus_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOptimisticStatus(t *testing.T) {
|
||||
server := &Server{HeadFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}}
|
||||
server := &Server{OptimisticModeFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}}
|
||||
err := server.optimisticStatus(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -592,14 +612,14 @@ func TestOptimisticStatus(t *testing.T) {
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server = &Server{HeadFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}}
|
||||
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}}
|
||||
err = server.optimisticStatus(context.Background())
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, codes.Unavailable, s.Code())
|
||||
require.ErrorContains(t, errOptimisticMode.Error(), err)
|
||||
|
||||
server = &Server{HeadFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}}
|
||||
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}}
|
||||
err = server.optimisticStatus(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -677,7 +697,9 @@ func TestValidatorStatus_CorrectActivationQueue(t *testing.T) {
|
||||
deposit := ðpb.Deposit{
|
||||
Data: depData,
|
||||
}
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, int64(i), depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, int64(i), root))
|
||||
|
||||
}
|
||||
|
||||
@@ -758,10 +780,14 @@ func TestMultipleValidatorStatus_Pubkeys(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
dep := deposits[0]
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, root))
|
||||
dep = deposits[2]
|
||||
assert.NoError(t, depositTrie.Insert(dep.Data.Signature, 15))
|
||||
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, depositTrie.HashTreeRoot()))
|
||||
root, err = depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, root))
|
||||
|
||||
vs := &Server{
|
||||
Ctx: context.Background(),
|
||||
@@ -918,7 +944,9 @@ func TestValidatorStatus_Invalid(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
|
||||
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
|
||||
p := &mockPOW.POWChain{
|
||||
TimesByHeight: map[int]uint64{
|
||||
|
||||
@@ -42,8 +42,9 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server := &Server{
|
||||
HeadFetcher: &mock.ChainService{Optimistic: true},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
|
||||
}
|
||||
_, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
|
||||
s, ok := status.FromError(err)
|
||||
@@ -52,8 +53,9 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
|
||||
require.ErrorContains(t, errOptimisticMode.Error(), err)
|
||||
|
||||
server = &Server{
|
||||
HeadFetcher: &mock.ChainService{Optimistic: false},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
HeadFetcher: &mock.ChainService{},
|
||||
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
|
||||
}
|
||||
_, err = server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -78,6 +78,7 @@ type Config struct {
|
||||
BeaconMonitoringPort int
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
CanonicalFetcher blockchain.CanonicalFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
@@ -110,6 +111,7 @@ type Config struct {
|
||||
MaxMsgSize int
|
||||
ExecutionEngineCaller powchain.EngineCaller
|
||||
ProposerIdsCache *cache.ProposerPayloadIDsCache
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
@@ -189,6 +191,7 @@ func (s *Service) Start() {
|
||||
AttPool: s.cfg.AttestationsPool,
|
||||
ExitPool: s.cfg.ExitPool,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
HeadUpdater: s.cfg.HeadUpdater,
|
||||
ForkFetcher: s.cfg.ForkFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
@@ -196,6 +199,7 @@ func (s *Service) Start() {
|
||||
DepositFetcher: s.cfg.DepositFetcher,
|
||||
ChainStartFetcher: s.cfg.ChainStartFetcher,
|
||||
Eth1InfoFetcher: s.cfg.POWChainService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
StateNotifier: s.cfg.StateNotifier,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
@@ -215,6 +219,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
HeadUpdater: s.cfg.HeadUpdater,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
@@ -228,7 +233,8 @@ func (s *Service) Start() {
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
}
|
||||
|
||||
nodeServer := &nodev1alpha1.Server{
|
||||
@@ -261,6 +267,7 @@ func (s *Service) Start() {
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
HeadUpdater: s.cfg.HeadUpdater,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
CanonicalFetcher: s.cfg.CanonicalFetcher,
|
||||
@@ -297,6 +304,7 @@ func (s *Service) Start() {
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
VoluntaryExitsPool: s.cfg.ExitPool,
|
||||
V1Alpha1ValidatorServer: validatorServer,
|
||||
@@ -335,6 +343,7 @@ func (s *Service) Start() {
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
}
|
||||
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
|
||||
ethpbservice.RegisterBeaconDebugServer(s.grpcServer, debugServerV1)
|
||||
|
||||
@@ -68,6 +68,7 @@ go_library(
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
|
||||
@@ -154,7 +154,8 @@ func (b *BeaconState) NumValidators() int {
|
||||
}
|
||||
|
||||
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
|
||||
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
//
|
||||
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
|
||||
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
|
||||
if b.validators == nil {
|
||||
return errors.New("nil validators in state")
|
||||
|
||||
@@ -1,36 +1,18 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
|
||||
var errAssertionFailed = errors.New("failed to convert interface to proto state")
|
||||
var errUnsupportedVersion = errors.New("unsupported beacon state version")
|
||||
|
||||
func (b *BeaconState) MarshalSSZ() ([]byte, error) {
|
||||
proto := b.ToProto()
|
||||
switch b.Version() {
|
||||
case version.Phase0:
|
||||
s, ok := proto.(*ethpb.BeaconState)
|
||||
if !ok {
|
||||
return nil, errAssertionFailed
|
||||
}
|
||||
return s.MarshalSSZ()
|
||||
case version.Altair:
|
||||
s, ok := proto.(*ethpb.BeaconStateAltair)
|
||||
if !ok {
|
||||
return nil, errAssertionFailed
|
||||
}
|
||||
return s.MarshalSSZ()
|
||||
case version.Bellatrix:
|
||||
s, ok := proto.(*ethpb.BeaconStateBellatrix)
|
||||
if !ok {
|
||||
return nil, errAssertionFailed
|
||||
}
|
||||
return s.MarshalSSZ()
|
||||
default:
|
||||
return nil, errUnsupportedVersion
|
||||
|
||||
s, ok := proto.(ssz.Marshaler)
|
||||
if !ok {
|
||||
return nil, errAssertionFailed
|
||||
}
|
||||
return s.MarshalSSZ()
|
||||
}
|
||||
|
||||
@@ -541,6 +541,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
// Initializes the Merkle layers for the beacon state if they are empty.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
if len(b.merkleLayers) > 0 {
|
||||
@@ -565,6 +566,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Recomputes the Merkle layers for the dirty fields in the state.
|
||||
//
|
||||
// WARNING: Caller must acquire the mutex before using.
|
||||
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
|
||||
for field := range b.dirtyFields {
|
||||
|
||||
@@ -8,16 +8,16 @@ import (
|
||||
var ErrNotInCache = errors.New("state not found in cache")
|
||||
|
||||
type CachedGetter interface {
|
||||
ByRoot([32]byte) (state.BeaconState, error)
|
||||
ByBlockRoot([32]byte) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
type CombinedCache struct {
|
||||
getters []CachedGetter
|
||||
}
|
||||
|
||||
func (c CombinedCache) ByRoot(root [32]byte) (state.BeaconState, error) {
|
||||
func (c CombinedCache) ByBlockRoot(r [32]byte) (state.BeaconState, error) {
|
||||
for _, getter := range c.getters {
|
||||
st, err := getter.ByRoot(root)
|
||||
st, err := getter.ByBlockRoot(r)
|
||||
if err == nil {
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -20,8 +20,8 @@ var (
|
||||
|
||||
// slotRootInfo specifies the slot root info in the epoch boundary state cache.
|
||||
type slotRootInfo struct {
|
||||
slot types.Slot
|
||||
root [32]byte
|
||||
slot types.Slot
|
||||
blockRoot [32]byte
|
||||
}
|
||||
|
||||
// slotKeyFn takes the string representation of the slot to be used as key
|
||||
@@ -66,9 +66,9 @@ func newBoundaryStateCache() *epochBoundaryState {
|
||||
}
|
||||
}
|
||||
|
||||
// ByRoot satisfies the CachedGetter interface
|
||||
func (e *epochBoundaryState) ByRoot(r [32]byte) (state.BeaconState, error) {
|
||||
rsi, ok, err := e.getByRoot(r)
|
||||
// ByBlockRoot satisfies the CachedGetter interface
|
||||
func (e *epochBoundaryState) ByBlockRoot(r [32]byte) (state.BeaconState, error) {
|
||||
rsi, ok, err := e.getByBlockRoot(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -79,14 +79,14 @@ func (e *epochBoundaryState) ByRoot(r [32]byte) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
// get epoch boundary state by its block root. Returns copied state in state info object if exists. Otherwise returns nil.
|
||||
func (e *epochBoundaryState) getByRoot(r [32]byte) (*rootStateInfo, bool, error) {
|
||||
func (e *epochBoundaryState) getByBlockRoot(r [32]byte) (*rootStateInfo, bool, error) {
|
||||
e.lock.RLock()
|
||||
defer e.lock.RUnlock()
|
||||
|
||||
return e.getByRootLockFree(r)
|
||||
return e.getByBlockRootLockFree(r)
|
||||
}
|
||||
|
||||
func (e *epochBoundaryState) getByRootLockFree(r [32]byte) (*rootStateInfo, bool, error) {
|
||||
func (e *epochBoundaryState) getByBlockRootLockFree(r [32]byte) (*rootStateInfo, bool, error) {
|
||||
obj, exists, err := e.rootStateCache.GetByKey(string(r[:]))
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@@ -122,24 +122,24 @@ func (e *epochBoundaryState) getBySlot(s types.Slot) (*rootStateInfo, bool, erro
|
||||
return nil, false, errNotSlotRootInfo
|
||||
}
|
||||
|
||||
return e.getByRootLockFree(info.root)
|
||||
return e.getByBlockRootLockFree(info.blockRoot)
|
||||
}
|
||||
|
||||
// put adds a state to the epoch boundary state cache. This method also trims the
|
||||
// least recently added state info if the cache size has reached the max cache
|
||||
// size limit.
|
||||
func (e *epochBoundaryState) put(r [32]byte, s state.BeaconState) error {
|
||||
func (e *epochBoundaryState) put(blockRoot [32]byte, s state.BeaconState) error {
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
|
||||
if err := e.slotRootCache.AddIfNotPresent(&slotRootInfo{
|
||||
slot: s.Slot(),
|
||||
root: r,
|
||||
slot: s.Slot(),
|
||||
blockRoot: blockRoot,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.rootStateCache.AddIfNotPresent(&rootStateInfo{
|
||||
root: r,
|
||||
root: blockRoot,
|
||||
state: s.Copy(),
|
||||
}); err != nil {
|
||||
return err
|
||||
@@ -152,11 +152,11 @@ func (e *epochBoundaryState) put(r [32]byte, s state.BeaconState) error {
|
||||
}
|
||||
|
||||
// delete the state from the epoch boundary state cache.
|
||||
func (e *epochBoundaryState) delete(r [32]byte) error {
|
||||
func (e *epochBoundaryState) delete(blockRoot [32]byte) error {
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
return e.rootStateCache.Delete(&rootStateInfo{
|
||||
root: r,
|
||||
root: blockRoot,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -27,12 +27,12 @@ func TestEpochBoundaryStateCache_CanSaveAndDelete(t *testing.T) {
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, e.put(r, s))
|
||||
|
||||
got, exists, err := e.getByRoot([32]byte{'b'})
|
||||
got, exists, err := e.getByBlockRoot([32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, exists, "Should not exist")
|
||||
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")
|
||||
|
||||
got, exists, err = e.getByRoot([32]byte{'a'})
|
||||
got, exists, err = e.getByBlockRoot([32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, exists, "Should exist")
|
||||
assert.DeepSSZEqual(t, s.InnerStateUnsafe(), got.state.InnerStateUnsafe(), "Should have the same state")
|
||||
@@ -48,7 +48,7 @@ func TestEpochBoundaryStateCache_CanSaveAndDelete(t *testing.T) {
|
||||
assert.DeepSSZEqual(t, s.InnerStateUnsafe(), got.state.InnerStateUnsafe(), "Should have the same state")
|
||||
|
||||
require.NoError(t, e.delete(r))
|
||||
got, exists, err = e.getByRoot([32]byte{'b'})
|
||||
got, exists, err = e.getByBlockRoot([32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, exists, "Should not exist")
|
||||
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")
|
||||
|
||||
@@ -28,24 +28,23 @@ func (s *State) HasState(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// HasStateInCache returns true if the state exists in cache.
|
||||
func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
func (s *State) HasStateInCache(_ context.Context, blockRoot [32]byte) (bool, error) {
|
||||
if s.hotStateCache.has(blockRoot) {
|
||||
return true, nil
|
||||
}
|
||||
_, has, err := s.epochBoundaryStateCache.getByRoot(blockRoot)
|
||||
_, has, err := s.epochBoundaryStateCache.getByBlockRoot(blockRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return has, nil
|
||||
}
|
||||
|
||||
// StateByRootIfCachedNoCopy retrieves a state using the input block root only if the state is already in the cache
|
||||
// StateByRootIfCachedNoCopy retrieves a state using the input block root only if the state is already in the cache.
|
||||
func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState {
|
||||
if !s.hotStateCache.has(blockRoot) {
|
||||
return nil
|
||||
}
|
||||
state := s.hotStateCache.getWithoutCopy(blockRoot)
|
||||
return state
|
||||
return s.hotStateCache.getWithoutCopy(blockRoot)
|
||||
}
|
||||
|
||||
// StateByRoot retrieves the state using input block root.
|
||||
@@ -53,7 +52,7 @@ func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.Beac
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.StateByRoot")
|
||||
defer span.End()
|
||||
|
||||
// Genesis case. If block root is zero hash, short circuit to use genesis cachedState stored in DB.
|
||||
// Genesis case. If block root is zero hash, short circuit to use genesis state stored in DB.
|
||||
if blockRoot == params.BeaconConfig().ZeroHash {
|
||||
return s.beaconDB.GenesisState(ctx)
|
||||
}
|
||||
@@ -62,23 +61,25 @@ func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.Beac
|
||||
|
||||
// StateByRootInitialSync retrieves the state from the DB for the initial syncing phase.
|
||||
// It assumes initial syncing using a block list rather than a block tree hence the returned
|
||||
// state is not copied.
|
||||
// It invalidates cache for parent root because pre state will get mutated.
|
||||
// Do not use this method for anything other than initial syncing purpose or block tree is applied.
|
||||
// state is not copied (block batches returned from initial sync are linear).
|
||||
// It invalidates cache for parent root because pre-state will get mutated.
|
||||
//
|
||||
// WARNING: Do not use this method for anything other than initial syncing purpose or block tree is applied.
|
||||
func (s *State) StateByRootInitialSync(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
// Genesis case. If block root is zero hash, short circuit to use genesis state stored in DB.
|
||||
if blockRoot == params.BeaconConfig().ZeroHash {
|
||||
return s.beaconDB.GenesisState(ctx)
|
||||
}
|
||||
|
||||
// To invalidate cache for parent root because pre state will get mutated.
|
||||
// To invalidate cache for parent root because pre-state will get mutated.
|
||||
// It is a parent root because StateByRootInitialSync is always used to fetch the block's parent state.
|
||||
defer s.hotStateCache.delete(blockRoot)
|
||||
|
||||
if s.hotStateCache.has(blockRoot) {
|
||||
return s.hotStateCache.getWithoutCopy(blockRoot), nil
|
||||
}
|
||||
|
||||
cachedInfo, ok, err := s.epochBoundaryStateCache.getByRoot(blockRoot)
|
||||
cachedInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -113,8 +114,7 @@ func (s *State) StateByRootInitialSync(ctx context.Context, blockRoot [32]byte)
|
||||
return startState, nil
|
||||
}
|
||||
|
||||
// This returns the state summary object of a given block root, it first checks the cache
|
||||
// then checks the DB. An error is returned if state summary object is nil.
|
||||
// This returns the state summary object of a given block root. It first checks the cache, then checks the DB.
|
||||
func (s *State) stateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
|
||||
var summary *ethpb.StateSummary
|
||||
var err error
|
||||
@@ -152,7 +152,7 @@ func (s *State) DeleteStateFromCaches(_ context.Context, blockRoot [32]byte) err
|
||||
return s.epochBoundaryStateCache.delete(blockRoot)
|
||||
}
|
||||
|
||||
// This loads a beacon state from either the cache or DB then replay blocks up the requested block root.
|
||||
// This loads a beacon state from either the cache or DB, then replays blocks up the slot of the requested block root.
|
||||
func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.loadStateByRoot")
|
||||
defer span.End()
|
||||
@@ -163,8 +163,8 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
// Second, it checks if the state exits in epoch boundary state cache.
|
||||
cachedInfo, ok, err := s.epochBoundaryStateCache.getByRoot(blockRoot)
|
||||
// Second, it checks if the state exists in epoch boundary state cache.
|
||||
cachedInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -172,7 +172,7 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
|
||||
return cachedInfo.state, nil
|
||||
}
|
||||
|
||||
// Short cut if the cachedState is already in the DB.
|
||||
// Short circuit if the state is already in the DB.
|
||||
if s.beaconDB.HasState(ctx, blockRoot) {
|
||||
return s.beaconDB.State(ctx, blockRoot)
|
||||
}
|
||||
@@ -183,8 +183,8 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
|
||||
}
|
||||
targetSlot := summary.Slot
|
||||
|
||||
// Since the requested state is not in caches, start replaying using the last available ancestor state which is
|
||||
// retrieved using input block's parent root.
|
||||
// Since the requested state is not in caches or DB, start replaying using the last
|
||||
// available ancestor state which is retrieved using input block's root.
|
||||
startState, err := s.LastAncestorState(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor state")
|
||||
@@ -193,7 +193,6 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
|
||||
return nil, errUnknownBoundaryState
|
||||
}
|
||||
|
||||
// Return state early if we are retrieving it from our finalized state cache.
|
||||
if startState.Slot() == targetSlot {
|
||||
return startState, nil
|
||||
}
|
||||
@@ -208,23 +207,23 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
|
||||
return s.ReplayBlocks(ctx, startState, blks, targetSlot)
|
||||
}
|
||||
|
||||
// This returns the highest available ancestor state of the input block root.
|
||||
// It recursively look up block's parent until a corresponding state of the block root
|
||||
// LastAncestorState returns the highest available ancestor state of the input block root.
|
||||
// It recursively looks up block's parent until a corresponding state of the block root
|
||||
// is found in the caches or DB.
|
||||
//
|
||||
// There's three ways to derive block parent state:
|
||||
// 1.) block parent state is the last finalized state
|
||||
// 2.) block parent state is the epoch boundary state and exists in epoch boundary cache.
|
||||
// 3.) block parent state is in DB.
|
||||
func (s *State) LastAncestorState(ctx context.Context, root [32]byte) (state.BeaconState, error) {
|
||||
// 1) block parent state is the last finalized state
|
||||
// 2) block parent state is the epoch boundary state and exists in epoch boundary cache
|
||||
// 3) block parent state is in DB
|
||||
func (s *State) LastAncestorState(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.LastAncestorState")
|
||||
defer span.End()
|
||||
|
||||
if s.isFinalizedRoot(root) && s.finalizedState() != nil {
|
||||
if s.isFinalizedRoot(blockRoot) && s.finalizedState() != nil {
|
||||
return s.finalizedState(), nil
|
||||
}
|
||||
|
||||
b, err := s.beaconDB.Block(ctx, root)
|
||||
b, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -237,13 +236,13 @@ func (s *State) LastAncestorState(ctx context.Context, root [32]byte) (state.Bea
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
// Is the state a genesis state.
|
||||
// Is the state the genesis state.
|
||||
parentRoot := bytesutil.ToBytes32(b.Block().ParentRoot())
|
||||
if parentRoot == params.BeaconConfig().ZeroHash {
|
||||
return s.beaconDB.GenesisState(ctx)
|
||||
}
|
||||
|
||||
// return an error if slot hasn't been covered by checkpoint sync backfill
|
||||
// Return an error if slot hasn't been covered by checkpoint sync.
|
||||
ps := b.Block().Slot() - 1
|
||||
if !s.slotAvailable(ps) {
|
||||
return nil, errors.Wrapf(ErrNoDataForSlot, "slot %d not in db due to checkpoint sync", ps)
|
||||
@@ -259,7 +258,7 @@ func (s *State) LastAncestorState(ctx context.Context, root [32]byte) (state.Bea
|
||||
}
|
||||
|
||||
// Does the state exist in epoch boundary cache.
|
||||
cachedInfo, ok, err := s.epochBoundaryStateCache.getByRoot(parentRoot)
|
||||
cachedInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(parentRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -271,6 +270,7 @@ func (s *State) LastAncestorState(ctx context.Context, root [32]byte) (state.Bea
|
||||
if s.beaconDB.HasState(ctx, parentRoot) {
|
||||
return s.beaconDB.State(ctx, parentRoot)
|
||||
}
|
||||
|
||||
b, err = s.beaconDB.Block(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -174,7 +174,7 @@ func TestDeleteStateFromCaches(t *testing.T) {
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, service.hotStateCache.has(r))
|
||||
_, has, err := service.epochBoundaryStateCache.getByRoot(r)
|
||||
_, has, err := service.epochBoundaryStateCache.getByBlockRoot(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
|
||||
@@ -182,14 +182,14 @@ func TestDeleteStateFromCaches(t *testing.T) {
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(r, beaconState))
|
||||
|
||||
require.Equal(t, true, service.hotStateCache.has(r))
|
||||
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
|
||||
_, has, err = service.epochBoundaryStateCache.getByBlockRoot(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
|
||||
require.NoError(t, service.DeleteStateFromCaches(ctx, r))
|
||||
|
||||
require.Equal(t, false, service.hotStateCache.has(r))
|
||||
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
|
||||
_, has, err = service.epochBoundaryStateCache.getByBlockRoot(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
|
||||
@@ -42,8 +42,8 @@ type CanonicalHistory struct {
|
||||
cache CachedGetter
|
||||
}
|
||||
|
||||
func (ch *CanonicalHistory) ReplayerForSlot(target types.Slot) Replayer {
|
||||
return &stateReplayer{chainer: ch, method: forSlot, target: target}
|
||||
func (c *CanonicalHistory) ReplayerForSlot(target types.Slot) Replayer {
|
||||
return &stateReplayer{chainer: c, method: forSlot, target: target}
|
||||
}
|
||||
|
||||
func (c *CanonicalHistory) BlockForSlot(ctx context.Context, target types.Slot) ([32]byte, interfaces.SignedBeaconBlock, error) {
|
||||
@@ -134,9 +134,9 @@ func (c *CanonicalHistory) chainForSlot(ctx context.Context, target types.Slot)
|
||||
return s, descendants, nil
|
||||
}
|
||||
|
||||
func (c *CanonicalHistory) getState(ctx context.Context, root [32]byte) (state.BeaconState, error) {
|
||||
func (c *CanonicalHistory) getState(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
if c.cache != nil {
|
||||
st, err := c.cache.ByRoot(root)
|
||||
st, err := c.cache.ByBlockRoot(blockRoot)
|
||||
if err == nil {
|
||||
return st, nil
|
||||
}
|
||||
@@ -144,7 +144,7 @@ func (c *CanonicalHistory) getState(ctx context.Context, root [32]byte) (state.B
|
||||
return nil, errors.Wrap(err, "error reading from state cache during state replay")
|
||||
}
|
||||
}
|
||||
return c.h.StateOrError(ctx, root)
|
||||
return c.h.StateOrError(ctx, blockRoot)
|
||||
}
|
||||
|
||||
// ancestorChain works backwards through the chain lineage, accumulating blocks and checking for a saved state.
|
||||
|
||||
@@ -24,7 +24,7 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// hotStateCache is used to store the processed beacon state after finalized check point..
|
||||
// hotStateCache is used to store the processed beacon state after finalized check point.
|
||||
type hotStateCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
@@ -39,10 +39,10 @@ func newHotStateCache() *hotStateCache {
|
||||
|
||||
// Get returns a cached response via input block root, if any.
|
||||
// The response is copied by default.
|
||||
func (c *hotStateCache) get(root [32]byte) state.BeaconState {
|
||||
func (c *hotStateCache) get(blockRoot [32]byte) state.BeaconState {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
item, exists := c.cache.Get(root)
|
||||
item, exists := c.cache.Get(blockRoot)
|
||||
|
||||
if exists && item != nil {
|
||||
hotStateCacheHit.Inc()
|
||||
@@ -52,8 +52,8 @@ func (c *hotStateCache) get(root [32]byte) state.BeaconState {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *hotStateCache) ByRoot(root [32]byte) (state.BeaconState, error) {
|
||||
st := c.get(root)
|
||||
func (c *hotStateCache) ByBlockRoot(r [32]byte) (state.BeaconState, error) {
|
||||
st := c.get(r)
|
||||
if st == nil {
|
||||
return nil, ErrNotInCache
|
||||
}
|
||||
@@ -61,10 +61,10 @@ func (c *hotStateCache) ByRoot(root [32]byte) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
// GetWithoutCopy returns a non-copied cached response via input block root.
|
||||
func (c *hotStateCache) getWithoutCopy(root [32]byte) state.BeaconState {
|
||||
func (c *hotStateCache) getWithoutCopy(blockRoot [32]byte) state.BeaconState {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
item, exists := c.cache.Get(root)
|
||||
item, exists := c.cache.Get(blockRoot)
|
||||
if exists && item != nil {
|
||||
hotStateCacheHit.Inc()
|
||||
return item.(state.BeaconState)
|
||||
@@ -74,22 +74,22 @@ func (c *hotStateCache) getWithoutCopy(root [32]byte) state.BeaconState {
|
||||
}
|
||||
|
||||
// put the response in the cache.
|
||||
func (c *hotStateCache) put(root [32]byte, state state.BeaconState) {
|
||||
func (c *hotStateCache) put(blockRoot [32]byte, state state.BeaconState) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache.Add(root, state)
|
||||
c.cache.Add(blockRoot, state)
|
||||
}
|
||||
|
||||
// has returns true if the key exists in the cache.
|
||||
func (c *hotStateCache) has(root [32]byte) bool {
|
||||
func (c *hotStateCache) has(blockRoot [32]byte) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.cache.Contains(root)
|
||||
return c.cache.Contains(blockRoot)
|
||||
}
|
||||
|
||||
// delete deletes the key exists in the cache.
|
||||
func (c *hotStateCache) delete(root [32]byte) bool {
|
||||
func (c *hotStateCache) delete(blockRoot [32]byte) bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.cache.Remove(root)
|
||||
return c.cache.Remove(blockRoot)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
// MigrateToCold advances the finalized info in between the cold and hot state sections.
|
||||
// It moves the recent finalized states from the hot section to the cold section and
|
||||
// only preserve the ones that's on archived point.
|
||||
// only preserves the ones that are on archived point.
|
||||
func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.MigrateToCold")
|
||||
defer span.End()
|
||||
@@ -31,7 +31,7 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start at previous finalized slot, stop at current finalized slot.
|
||||
// Start at previous finalized slot, stop at current finalized slot (it will be handled in the next migration).
|
||||
// If the slot is on archived point, save the state of that slot to the DB.
|
||||
for slot := oldFSlot; slot < fSlot; slot++ {
|
||||
if ctx.Err() != nil {
|
||||
@@ -69,7 +69,7 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
return err
|
||||
}
|
||||
aRoot = missingRoot
|
||||
// There's no need to generate the state if the state already exists on the DB.
|
||||
// There's no need to generate the state if the state already exists in the DB.
|
||||
// We can skip saving the state.
|
||||
if !s.beaconDB.HasState(ctx, aRoot) {
|
||||
aState, err = s.StateByRoot(ctx, missingRoot)
|
||||
@@ -80,13 +80,14 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
}
|
||||
|
||||
if s.beaconDB.HasState(ctx, aRoot) {
|
||||
// Remove hot state DB root to prevent it gets deleted later when we turn hot state save DB mode off.
|
||||
// If you are migrating a state and its already part of the hot state cache saved to the db,
|
||||
// you can just remove it from the hot state cache as it becomes redundant.
|
||||
s.saveHotStateDB.lock.Lock()
|
||||
roots := s.saveHotStateDB.savedStateRoots
|
||||
roots := s.saveHotStateDB.blockRootsOfSavedStates
|
||||
for i := 0; i < len(roots); i++ {
|
||||
if aRoot == roots[i] {
|
||||
s.saveHotStateDB.savedStateRoots = append(roots[:i], roots[i+1:]...)
|
||||
// There shouldn't be duplicated roots in `savedStateRoots`.
|
||||
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
|
||||
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
|
||||
// Break here is ok.
|
||||
break
|
||||
}
|
||||
@@ -107,7 +108,7 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
}
|
||||
|
||||
// Update finalized info in memory.
|
||||
fInfo, ok, err := s.epochBoundaryStateCache.getByRoot(fRoot)
|
||||
fInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user