Compare commits

...

15 Commits

Author SHA1 Message Date
Kasey Kirkham
a8b8871dce validate latest_block_header root == block root 2022-05-12 13:48:42 -05:00
Radosław Kapka
98622a052f Extract OptimisticSyncFetcher interface (#10654)
* Extract `OptimisticSyncFetcher` interface

* extract IsOptimistic

* fix tests

* more test fixes

* even more test fixes

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 17:23:45 +00:00
Potuz
61033ebea1 handle failure to update head (#10651)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 16:36:46 +00:00
Potuz
e808025b17 regression test off-by-one (#10675)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 15:49:37 +00:00
Radosław Kapka
7db0435ee0 Unify WARNING comments (#10678)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 15:25:44 +00:00
Nishant Das
1f086e4333 add more fuzz targets (#10682) 2022-05-12 10:47:29 -04:00
james-prysm
184e5be9de Fee recipient: checksum log (#10664)
* adding checksum check at validator client and beacon node

* adding validation and logs on validator client startup

* moving the log and validation

* fixing unit tests

* adding test for back checksum on validator client

* fixing bazel

* addressing comments

* fixing log display

* Update beacon-chain/node/config.go

* Apply suggestions from code review

* breaking up lines

* fixing unit test

* ugh another fix to unit test

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-11 19:36:57 +00:00
terencechain
e33850bf51 Don't return nil with new head (#10680) 2022-05-11 11:33:10 -07:00
Preston Van Loon
cc643ac4cc native-state: Simplify MarshalSSZ (#10677)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-11 14:25:11 +00:00
Nishant Das
abefe1e9d5 Add Sync Block Topic Fuzz Target (#10676)
* add new target

* remove

* Update beacon-chain/sync/BUILD.bazel
2022-05-11 13:18:12 +00:00
Nishant Das
b4e89fb28b Add in State Fuzzing (#10375)
* add it in

* build tags

* gaz

* remove dependency on fast-ssz for now

* copy and comment

* add in native state

* fix build

* no assert on invalid input, just return

* Add failing case

* fix it up

Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-05-11 12:47:54 +00:00
terencechain
4ad1c4df01 Cache and use justified and finalized payload block hash (#10657)
* Cache and use justified and finalized payload block hash

* Fix tests

* Use real byte

* Fix conflicts

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 21:20:28 +00:00
terencechain
6a197b47d9 Call fcu on invalid payload (#10565)
* Starting

* remove finalized root

* Just call fcu

* Review feedbacks

* fix one test

* Fix conflicts

* Update execution_engine_test.go

* Add a test for invalid recursive call

* Add comprehensive recursive test

* dissallow override empty hash

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 20:02:00 +00:00
Radosław Kapka
d102421a25 Improve ReceiveBlock's comment (#10671)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 19:13:28 +00:00
Radosław Kapka
7b1490429c Add link to e2e docs in README (#10672) 2022-05-10 14:16:40 +00:00
81 changed files with 1468 additions and 429 deletions

View File

@@ -20,7 +20,7 @@ import (
)
// ChainInfoFetcher defines a common interface for methods in blockchain service which
// directly retrieves chain info related data.
// directly retrieve chain info related data.
type ChainInfoFetcher interface {
HeadFetcher
FinalizationFetcher
@@ -49,7 +49,7 @@ type GenesisFetcher interface {
}
// HeadFetcher defines a common interface for methods in blockchain service which
// directly retrieves head related data.
// directly retrieve head related data.
type HeadFetcher interface {
HeadSlot() types.Slot
HeadRoot(ctx context.Context) ([]byte, error)
@@ -61,8 +61,6 @@ type HeadFetcher interface {
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ChainHeads() ([][32]byte, []types.Slot)
IsOptimistic(ctx context.Context) (bool, error)
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
HeadSyncCommitteeFetcher
HeadDomainFetcher
}
@@ -79,7 +77,7 @@ type CanonicalFetcher interface {
}
// FinalizationFetcher defines a common interface for methods in blockchain service which
// directly retrieves finalization and justification related data.
// directly retrieve finalization and justification related data.
type FinalizationFetcher interface {
FinalizedCheckpt() *ethpb.Checkpoint
CurrentJustifiedCheckpt() *ethpb.Checkpoint
@@ -87,6 +85,12 @@ type FinalizationFetcher interface {
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
}
// OptimisticModeFetcher retrieves information about optimistic status of the node.
type OptimisticModeFetcher interface {
IsOptimistic(ctx context.Context) (bool, error)
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
}
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
cp := s.store.FinalizedCheckpt()
@@ -238,7 +242,7 @@ func (s *Service) GenesisTime() time.Time {
return s.genesisTime
}
// GenesisValidatorsRoot returns the genesis validator
// GenesisValidatorsRoot returns the genesis validators
// root of the chain.
func (s *Service) GenesisValidatorsRoot() [32]byte {
s.headLock.RLock()
@@ -305,7 +309,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
return v.PublicKey(), nil
}
// ForkChoicer returns the forkchoice interface
// ForkChoicer returns the forkchoice interface.
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}
@@ -321,7 +325,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
return s.IsOptimisticForRoot(ctx, s.head.root)
}
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
@@ -351,7 +355,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, nil
}
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
if err != nil {
return false, err
@@ -369,7 +373,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
return false, err
}
// historical non-canonical blocks here are returned as optimistic for safety.
// Historical non-canonical blocks here are returned as optimistic for safety.
return !isCanonical, nil
}
@@ -378,7 +382,7 @@ func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
}
// ForkChoiceStore returns the fork choice store in the service
// ForkChoiceStore returns the fork choice store in the service.
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}

View File

@@ -51,7 +51,7 @@ func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckpt(cp)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
}
@@ -62,7 +62,7 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckpt(cp)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
c.originBlockRoot = genesisRoot
assert.DeepEqual(t, c.originBlockRoot[:], c.FinalizedCheckpt().Root)
}
@@ -73,7 +73,7 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
cp := &ethpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
c.store.SetJustifiedCheckpt(cp)
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
}
@@ -83,7 +83,7 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
c := setupBeaconChain(t, beaconDB)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c.store.SetJustifiedCheckpt(cp)
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
c.originBlockRoot = genesisRoot
assert.DeepEqual(t, c.originBlockRoot[:], c.CurrentJustifiedCheckpt().Root)
}

View File

@@ -31,11 +31,9 @@ var (
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
type notifyForkchoiceUpdateArg struct {
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.BeaconBlock
finalizedRoot [32]byte
justifiedRoot [32]byte
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.BeaconBlock
}
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
@@ -61,18 +59,12 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedHash, err := s.getPayloadHash(ctx, arg.finalizedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block hash")
}
justifiedHash, err := s.getPayloadHash(ctx, arg.justifiedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get justified block hash")
}
finalizedHash := s.store.FinalizedPayloadBlockHash()
justifiedHash := s.store.JustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: justifiedHash,
FinalizedBlockHash: finalizedHash,
SafeBlockHash: justifiedHash[:],
FinalizedBlockHash: finalizedHash[:],
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
@@ -89,10 +81,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
case powchain.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
headRoot := arg.headRoot
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
if err != nil {
@@ -101,12 +94,35 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return nil, err
}
r, err := s.updateHead(ctx, s.justifiedBalances.balances)
if err != nil {
return nil, err
}
b, err := s.getBlock(ctx, r)
if err != nil {
return nil, err
}
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
return nil, err
}
pid, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: st,
headRoot: r,
headBlock: b.Block(),
})
if err != nil {
return nil, err
}
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", headRoot),
"invalidCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return nil, ErrInvalidPayload
return pid, ErrInvalidPayload
default:
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
@@ -125,19 +141,19 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
// getPayloadHash returns the payload hash given the block root.
// if the block is before bellatrix fork epoch, it returns the zero hash.
func (s *Service) getPayloadHash(ctx context.Context, root [32]byte) ([]byte, error) {
finalizedBlock, err := s.getBlock(ctx, s.ensureRootNotZeros(root))
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
blk, err := s.getBlock(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(root)))
if err != nil {
return nil, err
return [32]byte{}, err
}
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
return params.BeaconConfig().ZeroHash[:], nil
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
payload, err := blk.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
}
return payload.BlockHash, nil
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload.

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
@@ -174,12 +175,15 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
fc := &ethpb.Checkpoint{Epoch: 1, Root: tt.finalizedRoot[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
arg := &notifyForkchoiceUpdateArg{
headState: st,
headRoot: tt.headRoot,
headBlock: tt.blk,
finalizedRoot: tt.finalizedRoot,
justifiedRoot: tt.justifiedRoot,
headState: st,
headRoot: tt.headRoot,
headBlock: tt.blk,
}
_, err := service.notifyForkchoiceUpdate(ctx, arg)
if tt.errString != "" {
@@ -191,6 +195,147 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
}
//
//
// A <- B <- C <- D
// \
// ---------- E <- F
// \
// ------ G
// D is the current head, attestations for F and G come late, both are invalid.
// We switch recursively to F then G and finally to D.
//
// We test:
// 1. forkchoice removes blocks F and G from the forkchoice implementation
// 2. forkchoice removes the weights of these blocks
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
// Prepare blocks
ba := util.NewBeaconBlockBellatrix()
ba.Block.Body.ExecutionPayload.BlockNumber = 1
wba, err := wrapper.WrappedSignedBeaconBlock(ba)
require.NoError(t, err)
bra, err := wba.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wba))
bb := util.NewBeaconBlockBellatrix()
bb.Block.Body.ExecutionPayload.BlockNumber = 2
wbb, err := wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
brb, err := wbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbb))
bc := util.NewBeaconBlockBellatrix()
bc.Block.Body.ExecutionPayload.BlockNumber = 3
wbc, err := wrapper.WrappedSignedBeaconBlock(bc)
require.NoError(t, err)
brc, err := wbc.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbc))
bd := util.NewBeaconBlockBellatrix()
pd := [32]byte{'D'}
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
bd.Block.Body.ExecutionPayload.BlockNumber = 4
wbd, err := wrapper.WrappedSignedBeaconBlock(bd)
require.NoError(t, err)
brd, err := wbd.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbd))
be := util.NewBeaconBlockBellatrix()
pe := [32]byte{'E'}
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
be.Block.Body.ExecutionPayload.BlockNumber = 5
wbe, err := wrapper.WrappedSignedBeaconBlock(be)
require.NoError(t, err)
bre, err := wbe.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbe))
bf := util.NewBeaconBlockBellatrix()
pf := [32]byte{'F'}
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
bf.Block.Body.ExecutionPayload.BlockNumber = 6
bf.Block.ParentRoot = bre[:]
wbf, err := wrapper.WrappedSignedBeaconBlock(bf)
require.NoError(t, err)
brf, err := wbf.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbf))
bg := util.NewBeaconBlockBellatrix()
bg.Block.Body.ExecutionPayload.BlockNumber = 7
pg := [32]byte{'G'}
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
bg.Block.ParentRoot = bre[:]
wbg, err := wrapper.WrappedSignedBeaconBlock(bg)
require.NoError(t, err)
brg, err := wbg.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
// Insert blocks into forkchoice
fcs := doublylinkedtree.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
service.justifiedBalances.balances = []uint64{50, 100, 200}
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0))
// Insert Attestations to D, F and G so that they have higher weight than D
// Ensure G is head
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
headRoot, err := fcs.Head(ctx, 0, bra, []uint64{50, 100, 200}, 0)
require.NoError(t, err)
require.Equal(t, brg, headRoot)
// Prepare Engine Mock to return invalid unless head is D, LVH = E
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
fc := &ethpb.Checkpoint{Epoch: 0, Root: bra[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
a := &notifyForkchoiceUpdateArg{
headState: st,
headBlock: wbg.Block(),
headRoot: brg,
}
_, err = service.notifyForkchoiceUpdate(ctx, a)
require.ErrorIs(t, ErrInvalidPayload, err)
// Ensure Head is D
headRoot, err = fcs.Head(ctx, 0, bra, service.justifiedBalances.balances, 0)
require.NoError(t, err)
require.Equal(t, brd, headRoot)
// Ensure F and G where removed but their parent E wasn't
require.Equal(t, false, fcs.HasNode(brf))
require.Equal(t, false, fcs.HasNode(brg))
require.Equal(t, true, fcs.HasNode(bre))
}
func Test_NotifyNewPayload(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
@@ -537,11 +682,11 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
jRoot, err := tt.justified.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
service.store.SetJustifiedCheckpt(
service.store.SetJustifiedCheckptAndPayloadHash(
&ethpb.Checkpoint{
Root: jRoot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
})
}, [32]byte{'a'})
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
err = service.optimisticCandidateBlock(ctx, tt.blk)
@@ -803,7 +948,7 @@ func TestService_getPayloadHash(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = service.getPayloadHash(ctx, [32]byte{})
_, err = service.getPayloadHash(ctx, []byte{})
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
b := util.NewBeaconBlock()
@@ -813,20 +958,20 @@ func TestService_getPayloadHash(t *testing.T) {
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
h, err := service.getPayloadHash(ctx, r)
h, err := service.getPayloadHash(ctx, r[:])
require.NoError(t, err)
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], h)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, h)
bb := util.NewBeaconBlockBellatrix()
h = []byte{'a'}
bb.Block.Body.ExecutionPayload.BlockHash = h
h = [32]byte{'a'}
bb.Block.Body.ExecutionPayload.BlockHash = h[:]
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
h, err = service.getPayloadHash(ctx, r)
h, err = service.getPayloadHash(ctx, r[:])
require.NoError(t, err)
require.DeepEqual(t, []byte{'a'}, h)
require.DeepEqual(t, [32]byte{'a'}, h)
}

View File

@@ -154,8 +154,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{'b'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
headRoot, err := service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
@@ -298,8 +298,8 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
Root: bellatrixBlkRoot[:],
Epoch: 1,
}
service.store.SetFinalizedCheckpt(fcp)
service.store.SetJustifiedCheckpt(fcp)
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)

View File

@@ -21,7 +21,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
}
}
// warning: only use these opts when you are certain there are no db calls
// WARNING: only use these opts when you are certain there are no db calls
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {

View File

@@ -64,7 +64,11 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
return err
}
if bytes.Equal(r, f.Root) {
s.store.SetJustifiedCheckpt(bj)
h, err := s.getPayloadHash(ctx, bj.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
}
}
return nil

View File

@@ -324,9 +324,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
r := [32]byte{'g'}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
r = bytesutil.ToBytes32([]byte{'A'})
@@ -358,9 +358,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
@@ -500,7 +500,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -535,7 +535,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -564,7 +564,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r32[:], Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
@@ -591,7 +591,7 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r32[:], Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33

View File

@@ -123,7 +123,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
@@ -146,9 +148,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
@@ -195,9 +194,19 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
newFinalized := postState.FinalizedCheckpointEpoch() > finalized.Epoch
if newFinalized {
s.store.SetPrevFinalizedCheckpt(finalized)
s.store.SetFinalizedCheckpt(postState.FinalizedCheckpoint())
cp := postState.FinalizedCheckpoint()
h, err := s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(cp, h)
s.store.SetPrevJustifiedCheckpt(justified)
s.store.SetJustifiedCheckpt(postState.CurrentJustifiedCheckpoint())
cp = postState.CurrentJustifiedCheckpoint()
h, err = s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
@@ -413,6 +422,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
}
s.saveInitSyncBlock(blockRoots[i], b)
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return nil, nil, err
}
}
for r, st := range boundaries {
@@ -426,14 +439,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return nil, nil, err
}
f := fCheckpoints[len(fCheckpoints)-1]
j := jCheckpoints[len(jCheckpoints)-1]
arg := &notifyForkchoiceUpdateArg{
headState: preState,
headRoot: lastBR,
headBlock: lastB.Block(),
finalizedRoot: bytesutil.ToBytes32(f.Root),
justifiedRoot: bytesutil.ToBytes32(j.Root),
headState: preState,
headRoot: lastBR,
headBlock: lastB.Block(),
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return nil, nil, err
@@ -484,7 +493,11 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
return err
}
s.store.SetPrevFinalizedCheckpt(finalized)
s.store.SetFinalizedCheckpt(fCheckpoint)
h, err := s.getPayloadHash(ctx, fCheckpoint.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
}
return nil
}

View File

@@ -208,7 +208,11 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
return errNilJustifiedInStore
}
s.store.SetPrevJustifiedCheckpt(justified)
s.store.SetJustifiedCheckpt(cpt)
h, err := s.getPayloadHash(ctx, cpt.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
}
return nil
@@ -227,7 +231,11 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
return err
}
s.store.SetJustifiedCheckpt(cp)
h, err := s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
return nil
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"strconv"
"sync"
"testing"
"time"
@@ -39,6 +40,7 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/time"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestStore_OnBlock_ProtoArray(t *testing.T) {
@@ -129,9 +131,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: roots[0]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
root, err := tt.blk.Block.HashTreeRoot()
@@ -232,9 +234,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: roots[0]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
root, err := tt.blk.Block.HashTreeRoot()
@@ -289,7 +291,8 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
@@ -353,7 +356,8 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
@@ -415,7 +419,9 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
@@ -484,7 +490,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err = service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
assert.Equal(t, true, update, "Should be able to update justified")
@@ -516,7 +522,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
@@ -549,7 +555,7 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
@@ -577,7 +583,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -614,7 +620,7 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -648,7 +654,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -656,7 +662,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 1
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
wb, err := wrapper.WrappedBeaconBlock(b.Block)
require.NoError(t, err)
err = service.verifyBlkPreState(ctx, wb)
@@ -691,7 +697,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
r, err := signedBlock.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: []byte{'A'}}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
st, err := util.NewBeaconState()
require.NoError(t, err)
@@ -723,7 +729,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -768,7 +774,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -814,7 +820,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -863,7 +869,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -913,7 +919,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -974,7 +980,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -1350,7 +1356,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
for _, tt := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:]}, [32]byte{})
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
@@ -1378,7 +1384,7 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
service.originBlockRoot = gRoot
currentCp := &ethpb.Checkpoint{Epoch: 1}
service.store.SetJustifiedCheckpt(currentCp)
service.store.SetJustifiedCheckptAndPayloadHash(currentCp, [32]byte{'a'})
newCp := &ethpb.Checkpoint{Epoch: 2, Root: gRoot[:]}
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
@@ -1439,7 +1445,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -1493,7 +1499,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -1524,7 +1530,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
@@ -1563,7 +1569,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(6))
@@ -1887,3 +1893,82 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
require.NoError(t, err)
service.insertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
}
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
blk1, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
wsb1, err := wrapper.WrappedSignedBeaconBlock(blk1)
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
wsb2, err := wrapper.WrappedSignedBeaconBlock(blk2)
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
wsb3, err := wrapper.WrappedSignedBeaconBlock(blk3)
require.NoError(t, err)
blk4, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 4)
require.NoError(t, err)
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
wsb4, err := wrapper.WrappedSignedBeaconBlock(blk4)
require.NoError(t, err)
logHook := logTest.NewGlobal()
for i := 0; i < 10; i++ {
var wg sync.WaitGroup
wg.Add(4)
go func() {
require.NoError(t, service.onBlock(ctx, wsb1, r1))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb2, r2))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb3, r3))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb4, r4))
wg.Done()
}()
wg.Wait()
require.LogsDoNotContain(t, logHook, "New head does not exist in DB. Do nothing")
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r1))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'a'})
}
}

View File

@@ -176,7 +176,7 @@ func (s *Service) UpdateHead(ctx context.Context) error {
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
if s.headRoot() == newHeadRoot {
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
return
}
@@ -185,12 +185,6 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
return // We don't have the block, don't notify the engine and update head.
}
finalized := s.store.FinalizedCheckpt()
if finalized == nil {
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
return
}
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get new head block")
@@ -202,11 +196,9 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
return
}
arg := &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: newHeadRoot,
headBlock: newHeadBlock.Block(),
finalizedRoot: bytesutil.ToBytes32(finalized.Root),
justifiedRoot: bytesutil.ToBytes32(s.store.JustifiedCheckpt().Root),
headState: headState,
headRoot: newHeadRoot,
headBlock: newHeadBlock.Block(),
}
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
if err != nil {

View File

@@ -137,14 +137,14 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.notifyEngineIfChangedHead(ctx, service.headRoot())
hookErr := "could not notify forkchoice update"
finalizedErr := "could not get finalized checkpoint"
require.LogsDoNotContain(t, hook, finalizedErr)
invalidStateErr := "Could not get state from db"
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
service.saveInitSyncBlock([32]byte{'a'}, gb)
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
require.LogsContain(t, hook, finalizedErr)
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
service.head = &head{
@@ -169,9 +169,9 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
// Block in DB
@@ -191,14 +191,19 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
require.Equal(t, true, has)
require.Equal(t, types.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
service.notifyEngineIfChangedHead(ctx, [32]byte{})
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {

View File

@@ -30,9 +30,9 @@ type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
}
// ReceiveBlock is a function that defines the the operations (minus pubsub)
// that are performed on blocks that is received from regular sync service. The operations consists of:
// 1. Validate block, apply state transition and update check points
// ReceiveBlock is a function that defines the operations (minus pubsub)
// that are performed on a received block. The operations consist of:
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
@@ -85,7 +85,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
defer span.End()
// Apply state transition on the incoming newly received block batches, one by one.
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
if err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
@@ -94,10 +94,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
for i, b := range blocks {
blockCopy := b.Copy()
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,

View File

@@ -141,7 +141,8 @@ func TestService_ReceiveBlock(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
h := [32]byte{'a'}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, h)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -181,7 +182,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wg := sync.WaitGroup{}
@@ -262,7 +263,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -312,7 +313,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
@@ -323,7 +324,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
@@ -336,7 +337,7 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 10000000})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))

View File

@@ -501,7 +501,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -522,7 +522,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -595,7 +595,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
@@ -618,7 +618,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)

View File

@@ -37,7 +37,7 @@ func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
// the previously read value. This cache assumes we only want to cache one
// set of balances for a single root (the current justified root).
//
// warning: this is not thread-safe on its own, relies on get() for locking
// WARNING: this is not thread-safe on its own, relies on get() for locking
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
stateBalanceCacheMiss.Inc()
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)

View File

@@ -23,6 +23,13 @@ func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
return s.justifiedCheckpt
}
// JustifiedPayloadBlockHash returns the justified payload block hash reflecting justified check point.
func (s *Store) JustifiedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.justifiedPayloadBlockHash
}
// PrevFinalizedCheckpt returns the previous finalized checkpoint in the Store.
func (s *Store) PrevFinalizedCheckpt() *ethpb.Checkpoint {
s.RLock()
@@ -37,6 +44,13 @@ func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
return s.finalizedCheckpt
}
// FinalizedPayloadBlockHash returns the finalized payload block hash reflecting finalized check point.
func (s *Store) FinalizedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.finalizedPayloadBlockHash
}
// SetPrevJustifiedCheckpt sets the previous justified checkpoint in the Store.
func (s *Store) SetPrevJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.Lock()
@@ -51,18 +65,20 @@ func (s *Store) SetBestJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.bestJustifiedCheckpt = cp
}
// SetJustifiedCheckpt sets the justified checkpoint in the Store.
func (s *Store) SetJustifiedCheckpt(cp *ethpb.Checkpoint) {
// SetJustifiedCheckptAndPayloadHash sets the justified checkpoint and blockhash in the Store.
func (s *Store) SetJustifiedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.justifiedCheckpt = cp
s.justifiedPayloadBlockHash = h
}
// SetFinalizedCheckpt sets the finalized checkpoint in the Store.
func (s *Store) SetFinalizedCheckpt(cp *ethpb.Checkpoint) {
// SetFinalizedCheckptAndPayloadHash sets the finalized checkpoint and blockhash in the Store.
func (s *Store) SetFinalizedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.finalizedCheckpt = cp
s.finalizedPayloadBlockHash = h
}
// SetPrevFinalizedCheckpt sets the previous finalized checkpoint in the Store.

View File

@@ -30,8 +30,10 @@ func Test_store_JustifiedCheckpt(t *testing.T) {
var cp *ethpb.Checkpoint
require.Equal(t, cp, s.JustifiedCheckpt())
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetJustifiedCheckpt(cp)
h := [32]byte{'b'}
s.SetJustifiedCheckptAndPayloadHash(cp, h)
require.Equal(t, cp, s.JustifiedCheckpt())
require.Equal(t, h, s.JustifiedPayloadBlockHash())
}
func Test_store_FinalizedCheckpt(t *testing.T) {
@@ -39,8 +41,10 @@ func Test_store_FinalizedCheckpt(t *testing.T) {
var cp *ethpb.Checkpoint
require.Equal(t, cp, s.FinalizedCheckpt())
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetFinalizedCheckpt(cp)
h := [32]byte{'b'}
s.SetFinalizedCheckptAndPayloadHash(cp, h)
require.Equal(t, cp, s.FinalizedCheckpt())
require.Equal(t, h, s.FinalizedPayloadBlockHash())
}
func Test_store_PrevFinalizedCheckpt(t *testing.T) {

View File

@@ -17,9 +17,11 @@ import (
// best_justified_checkpoint: Checkpoint
// proposerBoostRoot: Root
type Store struct {
justifiedCheckpt *ethpb.Checkpoint
finalizedCheckpt *ethpb.Checkpoint
bestJustifiedCheckpt *ethpb.Checkpoint
justifiedCheckpt *ethpb.Checkpoint
justifiedPayloadBlockHash [32]byte
finalizedCheckpt *ethpb.Checkpoint
finalizedPayloadBlockHash [32]byte
bestJustifiedCheckpt *ethpb.Checkpoint
sync.RWMutex
// These are not part of the consensus spec, but we do use them to return gRPC API requests.
// TODO(10094): Consider removing in v3.

View File

@@ -79,7 +79,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
store: &store.Store{},
wsVerifier: wv,
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: tt.finalizedEpoch})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: tt.finalizedEpoch}, [32]byte{})
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.store.FinalizedCheckpt().Epoch)
if tt.wantErr == nil {
require.NoError(t, err)

View File

@@ -21,3 +21,5 @@ var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
var errCheckpointBlockRootMismatch = errors.New("checkpoint block root does not match state.latest_block_header")

View File

@@ -55,13 +55,21 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
if err != nil {
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
}
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
headerRoot, err := state.LatestBlockHeader().HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute HashTreeRoot of state.latest_block_header to validate checkpoint block root")
}
if headerRoot != blockRoot {
return errors.Wrapf(errCheckpointBlockRootMismatch, "htr(state.latest_block_header)=%#x, htr(block)=%#x", headerRoot, blockRoot)
}
log.Infof("saving checkpoint block to db, w/ slot=%d, root=%#x", blk.Slot(), blockRoot)
if err := s.SaveBlock(ctx, wblk); err != nil {
return errors.Wrap(err, "could not save checkpoint block")
}
// save state
log.Infof("calling SaveState w/ blockRoot=%x", blockRoot)
log.Infof("calling SaveState w/ blockRoot=%x, slot=%d, parent_root=%#x, block_root=%#x", blockRoot, state.Slot(), state.LatestBlockHeader().ParentRoot, state.LatestBlockHeader().BodyRoot)
if err = s.SaveState(ctx, state, blockRoot); err != nil {
return errors.Wrap(err, "could not save state")
}

View File

@@ -81,7 +81,7 @@ func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error)
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
node.parent.children = children[:len(children)-2]
node.parent.children = children[:len(children)-1]
break
}
}

View File

@@ -203,3 +203,26 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}
// This is a regression test (10565)
// ----- C
// /
// A <- B
// \
// ----------D
// D is invalid
func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, 1, 1))
_, err := f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
require.NoError(t, err)
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
}

View File

@@ -24,6 +24,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
// SetOptimisticToValid is called with the root of a block that was returned as
// VALID by the EL.
//
// WARNING: This method returns an error if the root is not found in forkchoice
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
f.store.nodesLock.Lock()

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/params"
@@ -117,7 +118,18 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
if !common.IsHexAddress(ha) {
return fmt.Errorf("%s is not a valid fee recipient address", ha)
}
c.DefaultFeeRecipient = common.HexToAddress(ha)
mixedcaseAddress, err := common.NewMixedcaseAddressFromString(ha)
if err != nil {
return errors.Wrapf(err, "could not decode fee recipient %s", ha)
}
checksumAddress := common.HexToAddress(ha)
if !mixedcaseAddress.ValidChecksum() {
log.Warnf("Fee recipient %s is not a checksum Ethereum address. "+
"The checksummed address is %s and will be used as the fee recipient. "+
"We recommend using a mixed-case address (checksum) "+
"to prevent spelling mistakes in your fee recipient Ethereum address", ha, checksumAddress.Hex())
}
c.DefaultFeeRecipient = checksumAddress
params.OverrideBeaconConfig(c)
return nil
}

View File

@@ -87,6 +87,7 @@ func TestConfigureProofOfWork(t *testing.T) {
func TestConfigureExecutionSetting(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
@@ -102,11 +103,15 @@ func TestConfigureExecutionSetting(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
assert.LogsContain(t, hook,
"is not a checksum Ethereum address",
)
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"))
cliCtx = cli.NewContext(&app, set, nil)
err = configureExecutionSetting(cliCtx)
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
assert.Equal(t, common.HexToAddress("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"), params.BeaconConfig().DefaultFeeRecipient)
}
func TestConfigureNetwork(t *testing.T) {

View File

@@ -788,6 +788,7 @@ func (b *BeaconNode) registerRPCService() error {
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
OptimisticModeFetcher: chainService,
AttestationsPool: b.attestationPool,
ExitPool: b.exitPool,
SlashingsPool: b.slashingsPool,

View File

@@ -28,6 +28,7 @@ type EngineClient struct {
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
TerminalBlockHash []byte
TerminalBlockHashExists bool
OverrideValidHash [32]byte
}
// NewPayload --
@@ -37,8 +38,11 @@ func (e *EngineClient) NewPayload(_ context.Context, _ *pb.ExecutionPayload) ([]
// ForkchoiceUpdated --
func (e *EngineClient) ForkchoiceUpdated(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
_ context.Context, fcs *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) (*pb.PayloadIDBytes, []byte, error) {
if e.OverrideValidHash != [32]byte{} && bytesutil.ToBytes32(fcs.HeadBlockHash) == e.OverrideValidHash {
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, nil
}
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, e.ErrForkchoiceUpdated
}

View File

@@ -109,7 +109,7 @@ func (bs *Server) GetBlockHeader(ctx context.Context, req *ethpbv1.BlockRequest)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoot)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -175,7 +175,7 @@ func (bs *Server) ListBlockHeaders(ctx context.Context, req *ethpbv1.BlockHeader
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
}
if !isOptimistic {
isOptimistic, err = bs.HeadFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
isOptimistic, err = bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoots[i])
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -371,7 +371,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -543,7 +543,7 @@ func (bs *Server) GetBlockRoot(ctx context.Context, req *ethpbv1.BlockRequest) (
}
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(root))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
@@ -616,7 +616,7 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
}
isOptimistic, err := bs.HeadFetcher.IsOptimisticForRoot(ctx, root)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}

View File

@@ -187,9 +187,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
tests := []struct {
@@ -287,9 +288,10 @@ func TestServer_GetBlockHeader(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
header, err := bs.GetBlockHeader(ctx, &ethpbv1.BlockRequest{BlockId: []byte("head")})
require.NoError(t, err)
@@ -312,9 +314,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
b2 := util.NewBeaconBlock()
@@ -416,9 +419,10 @@ func TestServer_ListBlockHeaders(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
slot := types.Slot(30)
headers, err := bs.ListBlockHeaders(ctx, &ethpbv1.BlockHeadersRequest{
@@ -836,9 +840,10 @@ func TestServer_GetBlockV2(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
@@ -955,9 +960,10 @@ func TestServer_GetBlockV2(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
@@ -1074,9 +1080,10 @@ func TestServer_GetBlockV2(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
@@ -1194,9 +1201,10 @@ func TestServer_GetBlockV2(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
blk, err := bs.GetBlockV2(ctx, &ethpbv2.BlockRequestV2{
@@ -1395,9 +1403,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
root, err := genBlk.Block.HashTreeRoot()
@@ -1485,9 +1494,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
}
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),
@@ -1513,9 +1523,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
@@ -1615,9 +1626,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
@@ -1717,9 +1729,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
genBlk, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
@@ -1820,9 +1833,10 @@ func TestServer_ListBlockAttestations(t *testing.T) {
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
resp, err := bs.ListBlockAttestations(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),

View File

@@ -34,6 +34,7 @@ type Server struct {
StateGenService stategen.StateManager
StateFetcher statefetcher.Fetcher
HeadFetcher blockchain.HeadFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
V1Alpha1ValidatorServer *v1alpha1validator.Server
SyncChecker sync.Checker
CanonicalHistory *stategen.CanonicalHistory

View File

@@ -75,7 +75,7 @@ func (bs *Server) GetStateRoot(ctx context.Context, req *ethpb.StateRequest) (*e
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -103,7 +103,7 @@ func (bs *Server) GetStateFork(ctx context.Context, req *ethpb.StateRequest) (*e
return nil, helpers.PrepareStateFetchGRPCError(err)
}
fork := st.Fork()
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -133,7 +133,7 @@ func (bs *Server) GetFinalityCheckpoints(ctx context.Context, req *ethpb.StateRe
if err != nil {
return nil, helpers.PrepareStateFetchGRPCError(err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -80,13 +80,15 @@ func TestGetStateRoot(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &chainMock.ChainService{}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconStateRoot: stateRoot[:],
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateRoot(context.Background(), &eth.StateRequest{
@@ -107,13 +109,15 @@ func TestGetStateRoot(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconStateRoot: stateRoot[:],
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateRoot(context.Background(), &eth.StateRequest{
StateId: make([]byte, 0),
@@ -138,12 +142,14 @@ func TestGetStateFork(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &chainMock.ChainService{}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateFork(ctx, &eth.StateRequest{
@@ -167,12 +173,14 @@ func TestGetStateFork(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetStateFork(context.Background(), &eth.StateRequest{
StateId: make([]byte, 0),
@@ -204,12 +212,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &chainMock.ChainService{}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetFinalityCheckpoints(ctx, &eth.StateRequest{
@@ -235,12 +245,14 @@ func TestGetFinalityCheckpoints(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := server.GetFinalityCheckpoints(context.Background(), &eth.StateRequest{
StateId: make([]byte, 0),

View File

@@ -91,7 +91,7 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
return nil, status.Errorf(codes.Internal, "Could not extract sync subcommittees: %v", err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -162,6 +162,7 @@ func TestListSyncCommittees(t *testing.T) {
require.NoError(t, err)
db := dbTest.SetupDB(t)
chainService := &mock.ChainService{}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -169,8 +170,9 @@ func TestListSyncCommittees(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &mock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
req := &ethpbv2.StateSyncCommitteesRequest{StateId: stRoot[:]}
resp, err := s.ListSyncCommittees(ctx, req)
@@ -205,6 +207,7 @@ func TestListSyncCommittees(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &mock.ChainService{Optimistic: true}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -212,8 +215,9 @@ func TestListSyncCommittees(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &mock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListSyncCommittees(ctx, req)
require.NoError(t, err)
@@ -261,6 +265,7 @@ func TestListSyncCommitteesFuture(t *testing.T) {
}))
db := dbTest.SetupDB(t)
chainService := &mock.ChainService{}
s := &Server{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
@@ -268,8 +273,9 @@ func TestListSyncCommitteesFuture(t *testing.T) {
StateFetcher: &futureSyncMockFetcher{
BeaconState: st,
},
HeadFetcher: &mock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
req := &ethpbv2.StateSyncCommitteesRequest{}
epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod

View File

@@ -57,7 +57,7 @@ func (bs *Server) GetValidator(ctx context.Context, req *ethpb.StateValidatorReq
return nil, status.Error(codes.NotFound, "Could not find validator")
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -80,7 +80,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
return nil, handleValContainerErr(err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -143,7 +143,7 @@ func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.Validato
}
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -202,7 +202,7 @@ func (bs *Server) ListCommittees(ctx context.Context, req *ethpb.StateCommittees
}
}
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -33,12 +33,14 @@ func TestGetValidator(t *testing.T) {
st, _ = util.DeterministicGenesisState(t, 8192)
t.Run("Head Get Validator by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.GetValidator(ctx, &ethpb.StateValidatorRequest{
@@ -50,12 +52,14 @@ func TestGetValidator(t *testing.T) {
})
t.Run("Head Get Validator by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
pubKey := st.PubkeyAtIndex(types.ValidatorIndex(20))
@@ -93,12 +97,14 @@ func TestGetValidator(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.GetValidator(ctx, &ethpb.StateValidatorRequest{
StateId: []byte("head"),
@@ -117,12 +123,14 @@ func TestListValidators(t *testing.T) {
st, _ = util.DeterministicGenesisState(t, 8192)
t.Run("Head List All Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -136,12 +144,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Head List Validators by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
@@ -158,12 +168,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Head List Validators by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 66, 90, 100}
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
@@ -184,12 +196,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Head List Validators by both index and pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 90, 170, 129}
@@ -212,12 +226,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Unknown public key is ignored", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
existingKey := st.PubkeyAtIndex(types.ValidatorIndex(1))
@@ -232,12 +248,14 @@ func TestListValidators(t *testing.T) {
})
t.Run("Unknown index is ignored", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("1"), []byte("99999")}
@@ -261,12 +279,14 @@ func TestListValidators(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
StateId: []byte("head"),
@@ -349,12 +369,14 @@ func TestListValidators_Status(t *testing.T) {
}
t.Run("Head List All ACTIVE Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -384,12 +406,14 @@ func TestListValidators_Status(t *testing.T) {
})
t.Run("Head List All ACTIVE_ONGOING Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -418,12 +442,14 @@ func TestListValidators_Status(t *testing.T) {
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*35))
t.Run("Head List All EXITED Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -451,12 +477,14 @@ func TestListValidators_Status(t *testing.T) {
})
t.Run("Head List All PENDING_INITIALIZED and EXITED_UNSLASHED Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -484,12 +512,14 @@ func TestListValidators_Status(t *testing.T) {
})
t.Run("Head List All PENDING and EXITED Validators", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &statefetcher.StateProvider{
ChainInfoFetcher: &chainMock.ChainService{State: st},
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListValidators(ctx, &ethpb.StateValidatorsRequest{
@@ -532,12 +562,14 @@ func TestListValidatorBalances(t *testing.T) {
require.NoError(t, st.SetBalances(balances))
t.Run("Head List Validators Balance by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
@@ -554,12 +586,14 @@ func TestListValidatorBalances(t *testing.T) {
})
t.Run("Head List Validators Balance by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 66, 90, 100}
pubkey1 := st.PubkeyAtIndex(types.ValidatorIndex(20))
@@ -579,12 +613,14 @@ func TestListValidatorBalances(t *testing.T) {
})
t.Run("Head List Validators Balance by both index and pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
idNums := []types.ValidatorIndex{20, 90, 170, 129}
@@ -613,12 +649,14 @@ func TestListValidatorBalances(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
ids := [][]byte{[]byte("15"), []byte("26"), []byte("400")}
@@ -640,12 +678,14 @@ func TestListCommittees(t *testing.T) {
epoch := slots.ToEpoch(st.Slot())
t.Run("Head All Committees", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListCommittees(ctx, &ethpb.StateCommitteesRequest{
@@ -660,12 +700,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Epoch 10", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
epoch := types.Epoch(10)
resp, err := s.ListCommittees(ctx, &ethpb.StateCommitteesRequest{
@@ -679,12 +721,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Slot 4", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
slot := types.Slot(4)
@@ -704,12 +748,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Index 1", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
index := types.CommitteeIndex(1)
@@ -729,12 +775,14 @@ func TestListCommittees(t *testing.T) {
})
t.Run("Head All Committees of Slot 2, Index 1", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
index := types.CommitteeIndex(1)
@@ -764,12 +812,14 @@ func TestListCommittees(t *testing.T) {
require.NoError(t, db.SaveBlock(ctx, wsb))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
chainService := &chainMock.ChainService{Optimistic: true}
s := Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: st,
},
HeadFetcher: &chainMock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
BeaconDB: db,
}
resp, err := s.ListCommittees(ctx, &ethpb.StateCommitteesRequest{

View File

@@ -65,7 +65,7 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
if err != nil {
return nil, helpers.PrepareStateFetchGRPCError(err)
}
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.HeadFetcher)
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -167,7 +167,7 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
Data: make([]*ethpbv2.ForkChoiceHead, len(headRoots)),
}
for i := range headRoots {
isOptimistic, err := ds.HeadFetcher.IsOptimisticForRoot(ctx, headRoots[i])
isOptimistic, err := ds.OptimisticModeFetcher.IsOptimisticForRoot(ctx, headRoots[i])
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if head is optimistic: %v", err)
}

View File

@@ -44,8 +44,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -60,8 +61,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -76,8 +78,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -102,8 +105,9 @@ func TestGetBeaconStateV2(t *testing.T) {
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
BeaconDB: db,
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{Optimistic: true},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.StateRequestV2{
StateId: make([]byte, 0),
@@ -238,8 +242,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
}}
chainService := &blockchainmock.ChainService{}
server := &Server{
HeadFetcher: &blockchainmock.ChainService{},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
require.NoError(t, err)
@@ -257,8 +263,10 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
}
t.Run("optimistic head", func(t *testing.T) {
chainService := &blockchainmock.ChainService{Optimistic: true}
server := &Server{
HeadFetcher: &blockchainmock.ChainService{Optimistic: true},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
require.NoError(t, err)

View File

@@ -12,7 +12,8 @@ import (
// Server defines a server implementation of the gRPC Beacon Chain service,
// providing RPC endpoints to access data relevant to the Ethereum Beacon Chain.
type Server struct {
BeaconDB db.ReadOnlyDatabase
HeadFetcher blockchain.HeadFetcher
StateFetcher statefetcher.Fetcher
BeaconDB db.ReadOnlyDatabase
HeadFetcher blockchain.HeadFetcher
StateFetcher statefetcher.Fetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
}

View File

@@ -39,7 +39,7 @@ func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blo
}
// IsOptimistic checks whether the latest block header of the passed in beacon state is the header of an optimistic block.
func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockchain.HeadFetcher) (bool, error) {
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticSyncFetcher blockchain.OptimisticModeFetcher) (bool, error) {
root, err := st.HashTreeRoot(ctx)
if err != nil {
return false, errors.Wrap(err, "could not get state root")
@@ -50,7 +50,7 @@ func IsOptimistic(ctx context.Context, st state.BeaconState, headFetcher blockch
if err != nil {
return false, errors.Wrap(err, "could not get header root")
}
isOptimistic, err := headFetcher.IsOptimisticForRoot(ctx, headRoot)
isOptimistic, err := optimisticSyncFetcher.IsOptimisticForRoot(ctx, headRoot)
if err != nil {
return false, errors.Wrap(err, "could not check if block is optimistic")
}

View File

@@ -58,14 +58,14 @@ func TestIsOptimistic(t *testing.T) {
require.NoError(t, err)
t.Run("optimistic", func(t *testing.T) {
mockHeadFetcher := &chainmock.ChainService{Optimistic: true}
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: true}
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
require.NoError(t, err)
assert.Equal(t, true, o)
})
t.Run("not optimistic", func(t *testing.T) {
mockHeadFetcher := &chainmock.ChainService{Optimistic: false}
o, err := IsOptimistic(ctx, st, mockHeadFetcher)
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: false}
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
require.NoError(t, err)
assert.Equal(t, false, o)
})

View File

@@ -13,14 +13,15 @@ import (
// Server defines a server implementation of the gRPC Validator service,
// providing RPC endpoints intended for validator clients.
type Server struct {
HeadFetcher blockchain.HeadFetcher
HeadUpdater blockchain.HeadUpdater
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster
StateFetcher statefetcher.Fetcher
SyncCommitteePool synccommittee.Pool
V1Alpha1Server *v1alpha1validator.Server
HeadFetcher blockchain.HeadFetcher
HeadUpdater blockchain.HeadUpdater
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationsPool attestations.Pool
PeerManager p2p.PeerManager
Broadcaster p2p.Broadcaster
StateFetcher statefetcher.Fetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncCommitteePool synccommittee.Pool
V1Alpha1Server *v1alpha1validator.Server
}

View File

@@ -58,7 +58,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -142,7 +142,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.HeadFetcher)
isOptimistic, err := rpchelpers.IsOptimistic(ctx, s, vs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}
@@ -258,7 +258,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
return nil, status.Errorf(codes.Internal, "Could not get duties: %v", err)
}
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.HeadFetcher)
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.OptimisticModeFetcher)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
}

View File

@@ -82,9 +82,10 @@ func TestGetAttesterDuties(t *testing.T) {
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
OptimisticModeFetcher: chain,
}
t.Run("Single validator", func(t *testing.T) {
@@ -160,9 +161,10 @@ func TestGetAttesterDuties(t *testing.T) {
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := &ethpbv1.AttesterDutiesRequest{
@@ -231,9 +233,10 @@ func TestGetAttesterDuties(t *testing.T) {
State: bs, Root: genesisRoot[:], Slot: &chainSlot, Optimistic: true,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := &ethpbv1.AttesterDutiesRequest{
Epoch: 0,
@@ -285,9 +288,10 @@ func TestGetProposerDuties(t *testing.T) {
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
t.Run("Ok", func(t *testing.T) {
@@ -335,9 +339,10 @@ func TestGetProposerDuties(t *testing.T) {
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := &ethpbv1.ProposerDutiesRequest{
@@ -386,9 +391,10 @@ func TestGetProposerDuties(t *testing.T) {
State: bs, Root: genesisRoot[:], Slot: &chainSlot, Optimistic: true,
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := &ethpbv1.ProposerDutiesRequest{
Epoch: 0,
@@ -433,10 +439,11 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
mockChainService := &mockChain.ChainService{Genesis: genesisTime}
vs := &Server{
StateFetcher: &testutil.MockFetcher{BeaconState: st},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
StateFetcher: &testutil.MockFetcher{BeaconState: st},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
t.Run("Single validator", func(t *testing.T) {
@@ -571,10 +578,11 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
}
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot}
vs := &Server{
StateFetcher: &testutil.MockFetcher{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
StateFetcher: &testutil.MockFetcher{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
req := &ethpbv2.SyncCommitteeDutiesRequest{
@@ -606,10 +614,11 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Optimistic: true}
vs := &Server{
StateFetcher: &testutil.MockFetcher{BeaconState: st},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
StateFetcher: &testutil.MockFetcher{BeaconState: st},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
req := &ethpbv2.SyncCommitteeDutiesRequest{
Epoch: 0,
@@ -624,9 +633,10 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
func TestGetSyncCommitteeDuties_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := vs.GetSyncCommitteeDuties(context.Background(), &ethpbv2.SyncCommitteeDutiesRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
@@ -1029,6 +1039,7 @@ func TestProduceBlockV2(t *testing.T) {
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
@@ -1433,6 +1444,7 @@ func TestProduceBlindedBlock(t *testing.T) {
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},

View File

@@ -178,9 +178,10 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
params.OverrideBeaconConfig(cfg)
as := &Server{
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: true},
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
}
_, err := as.GetAttestationData(context.Background(), &ethpb.AttestationDataRequest{})
s, ok := status.FromError(err)
@@ -191,10 +192,11 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
as = &Server{
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
AttestationCache: cache.NewAttestationCache(),
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
AttestationCache: cache.NewAttestationCache(),
}
_, err = as.GetAttestationData(context.Background(), &ethpb.AttestationDataRequest{})
require.NoError(t, err)

View File

@@ -5,6 +5,7 @@ import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -130,7 +131,7 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
if bytes.Equal(feeRecipient.Bytes(), burnAddr) {
logrus.WithFields(logrus.Fields{
"validatorIndex": vIdx,
"burnAddress": burnAddr,
"burnAddress": common.BytesToAddress(burnAddr).Hex(),
}).Error("Fee recipient not set. Using burn address")
}
default:

View File

@@ -2437,7 +2437,7 @@ func TestProposer_GetBeaconBlock_Optimistic(t *testing.T) {
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
require.NoError(t, err)
proposerServer := &Server{HeadFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
proposerServer := &Server{OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{}}
req := &ethpb.BlockRequest{
Slot: bellatrixSlot + 1,
}

View File

@@ -51,6 +51,7 @@ type Server struct {
DepositFetcher depositcache.DepositFetcher
ChainStartFetcher powchain.ChainStartFetcher
Eth1InfoFetcher powchain.ChainInfoFetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
SyncChecker sync.Checker
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier

View File

@@ -255,7 +255,7 @@ func (vs *Server) optimisticStatus(ctx context.Context) error {
if slots.ToEpoch(vs.TimeFetcher.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return nil
}
optimistic, err := vs.HeadFetcher.IsOptimistic(ctx)
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
}

View File

@@ -603,7 +603,7 @@ func TestActivationStatus_OK(t *testing.T) {
}
func TestOptimisticStatus(t *testing.T) {
server := &Server{HeadFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}}
server := &Server{OptimisticModeFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}}
err := server.optimisticStatus(context.Background())
require.NoError(t, err)
@@ -612,14 +612,14 @@ func TestOptimisticStatus(t *testing.T) {
cfg.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(cfg)
server = &Server{HeadFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}}
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}}
err = server.optimisticStatus(context.Background())
s, ok := status.FromError(err)
require.Equal(t, true, ok)
require.DeepEqual(t, codes.Unavailable, s.Code())
require.ErrorContains(t, errOptimisticMode.Error(), err)
server = &Server{HeadFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}}
server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}}
err = server.optimisticStatus(context.Background())
require.NoError(t, err)
}

View File

@@ -42,8 +42,9 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
params.OverrideBeaconConfig(cfg)
server := &Server{
HeadFetcher: &mock.ChainService{Optimistic: true},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
}
_, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
s, ok := status.FromError(err)
@@ -52,8 +53,9 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) {
require.ErrorContains(t, errOptimisticMode.Error(), err)
server = &Server{
HeadFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
}
_, err = server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
require.NoError(t, err)

View File

@@ -111,6 +111,7 @@ type Config struct {
MaxMsgSize int
ExecutionEngineCaller powchain.EngineCaller
ProposerIdsCache *cache.ProposerPayloadIDsCache
OptimisticModeFetcher blockchain.OptimisticModeFetcher
}
// NewService instantiates a new RPC service instance that will
@@ -198,6 +199,7 @@ func (s *Service) Start() {
DepositFetcher: s.cfg.DepositFetcher,
ChainStartFetcher: s.cfg.ChainStartFetcher,
Eth1InfoFetcher: s.cfg.POWChainService,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
SyncChecker: s.cfg.SyncService,
StateNotifier: s.cfg.StateNotifier,
BlockNotifier: s.cfg.BlockNotifier,
@@ -231,7 +233,8 @@ func (s *Service) Start() {
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
},
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
}
nodeServer := &nodev1alpha1.Server{
@@ -301,6 +304,7 @@ func (s *Service) Start() {
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
},
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
VoluntaryExitsPool: s.cfg.ExitPool,
V1Alpha1ValidatorServer: validatorServer,
@@ -339,6 +343,7 @@ func (s *Service) Start() {
StateGenService: s.cfg.StateGen,
ReplayerBuilder: ch,
},
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
}
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
ethpbservice.RegisterBeaconDebugServer(s.grpcServer, debugServerV1)

View File

@@ -68,6 +68,7 @@ go_library(
"//encoding/ssz:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@io_opencensus_go//trace:go_default_library",

View File

@@ -154,7 +154,8 @@ func (b *BeaconState) NumValidators() int {
}
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
//
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
if b.validators == nil {
return errors.New("nil validators in state")

View File

@@ -1,36 +1,18 @@
package state_native
import (
ssz "github.com/ferranbt/fastssz"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
)
var errAssertionFailed = errors.New("failed to convert interface to proto state")
var errUnsupportedVersion = errors.New("unsupported beacon state version")
func (b *BeaconState) MarshalSSZ() ([]byte, error) {
proto := b.ToProto()
switch b.Version() {
case version.Phase0:
s, ok := proto.(*ethpb.BeaconState)
if !ok {
return nil, errAssertionFailed
}
return s.MarshalSSZ()
case version.Altair:
s, ok := proto.(*ethpb.BeaconStateAltair)
if !ok {
return nil, errAssertionFailed
}
return s.MarshalSSZ()
case version.Bellatrix:
s, ok := proto.(*ethpb.BeaconStateBellatrix)
if !ok {
return nil, errAssertionFailed
}
return s.MarshalSSZ()
default:
return nil, errUnsupportedVersion
s, ok := proto.(ssz.Marshaler)
if !ok {
return nil, errAssertionFailed
}
return s.MarshalSSZ()
}

View File

@@ -541,6 +541,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
}
// Initializes the Merkle layers for the beacon state if they are empty.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
if len(b.merkleLayers) > 0 {
@@ -565,6 +566,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
}
// Recomputes the Merkle layers for the dirty fields in the state.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
for field := range b.dirtyFields {

View File

@@ -66,13 +66,16 @@ go_test(
"readonly_validator_test.go",
"references_test.go",
"setters_attestation_test.go",
"state_fuzz_test.go",
"state_test.go",
"state_trie_test.go",
"types_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//beacon-chain/state/types:go_default_library",
@@ -80,6 +83,7 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/interop:go_default_library",

View File

@@ -174,7 +174,8 @@ func (b *BeaconState) NumValidators() int {
}
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
//
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
if !b.hasInnerState() {
return ErrNilInnerState

View File

@@ -0,0 +1,88 @@
//go:build go1.18
// +build go1.18
package v1_test
import (
"context"
"testing"
coreState "github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/crypto/rand"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/util"
)
func FuzzV1StateHashTreeRoot(f *testing.F) {
gState, _ := util.DeterministicGenesisState(f, 100)
output, err := gState.MarshalSSZ()
assert.NoError(f, err)
randPool := make([]byte, 100)
_, err = rand.NewDeterministicGenerator().Read(randPool)
assert.NoError(f, err)
f.Add(randPool, uint64(10))
f.Fuzz(func(t *testing.T, diffBuffer []byte, slotsToTransition uint64) {
stateSSZ := bytesutil.SafeCopyBytes(output)
for i := 0; i < len(diffBuffer); i += 9 {
if i+8 >= len(diffBuffer) {
return
}
num := bytesutil.BytesToUint64BigEndian(diffBuffer[i : i+8])
num %= uint64(len(diffBuffer))
// Perform a XOR on the byte of the selected index.
stateSSZ[num] ^= diffBuffer[i+8]
}
pbState := &ethpb.BeaconState{}
err := pbState.UnmarshalSSZ(stateSSZ)
if err != nil {
return
}
nativeState, err := native.InitializeFromProtoPhase0(pbState)
assert.NoError(t, err)
slotsToTransition %= 100
stateObj, err := v1.InitializeFromProtoUnsafe(pbState)
assert.NoError(t, err)
for stateObj.Slot() < types.Slot(slotsToTransition) {
stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1)
assert.NoError(t, err)
stateObj.Copy()
nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1)
assert.NoError(t, err)
nativeState.Copy()
}
assert.NoError(t, err)
// Perform a cold HTR calculation by initializing a new state.
innerState, ok := stateObj.InnerStateUnsafe().(*ethpb.BeaconState)
assert.Equal(t, true, ok, "inner state is a not a beacon state proto")
newState, err := v1.InitializeFromProtoUnsafe(innerState)
assert.NoError(t, err)
newRt, newErr := newState.HashTreeRoot(context.Background())
rt, err := stateObj.HashTreeRoot(context.Background())
nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background())
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.Equal(t, rt, newRt)
assert.Equal(t, rt, nativeRt)
}
newSSZ, newErr := newState.MarshalSSZ()
stateObjSSZ, err := stateObj.MarshalSSZ()
nativeSSZ, nativeErr := nativeState.MarshalSSZ()
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.DeepEqual(t, newSSZ, stateObjSSZ)
assert.DeepEqual(t, newSSZ, nativeSSZ)
}
})
}

View File

@@ -213,6 +213,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
}
// Initializes the Merkle layers for the beacon state if they are empty.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
if len(b.merkleLayers) > 0 {
@@ -229,6 +230,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
}
// Recomputes the Merkle layers for the dirty fields in the state.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
for field := range b.dirtyFields {

View File

@@ -66,11 +66,15 @@ go_test(
"proofs_test.go",
"references_test.go",
"setters_test.go",
"state_fuzz_test.go",
"state_trie_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//beacon-chain/state/types:go_default_library",
@@ -79,6 +83,7 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -175,7 +175,8 @@ func (b *BeaconState) NumValidators() int {
}
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
//
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
if !b.hasInnerState() {
return ErrNilInnerState

View File

@@ -0,0 +1,90 @@
//go:build go1.18
// +build go1.18
package v2_test
import (
"context"
"testing"
coreState "github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/crypto/rand"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/util"
)
func FuzzV2StateHashTreeRoot(f *testing.F) {
gState, _ := util.DeterministicGenesisStateAltair(f, 100)
output, err := gState.MarshalSSZ()
assert.NoError(f, err)
randPool := make([]byte, 100)
_, err = rand.NewDeterministicGenerator().Read(randPool)
assert.NoError(f, err)
f.Add(randPool, uint64(10))
f.Fuzz(func(t *testing.T, diffBuffer []byte, slotsToTransition uint64) {
stateSSZ := bytesutil.SafeCopyBytes(output)
for i := 0; i < len(diffBuffer); i += 9 {
if i+8 >= len(diffBuffer) {
return
}
num := bytesutil.BytesToUint64BigEndian(diffBuffer[i : i+8])
num %= uint64(len(diffBuffer))
// Perform a XOR on the byte of the selected index.
stateSSZ[num] ^= diffBuffer[i+8]
}
pbState := &ethpb.BeaconStateAltair{}
err := pbState.UnmarshalSSZ(stateSSZ)
if err != nil {
return
}
nativeState, err := native.InitializeFromProtoAltair(pbState)
if err != nil {
return
}
slotsToTransition %= 100
stateObj, err := v2.InitializeFromProtoUnsafe(pbState)
assert.NoError(t, err)
for stateObj.Slot() < types.Slot(slotsToTransition) {
stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1)
assert.NoError(t, err)
stateObj.Copy()
nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1)
assert.NoError(t, err)
nativeState.Copy()
}
assert.NoError(t, err)
// Perform a cold HTR calculation by initializing a new state.
innerState, ok := stateObj.InnerStateUnsafe().(*ethpb.BeaconStateAltair)
assert.Equal(t, true, ok, "inner state is a not a beacon state altair proto")
newState, err := v2.InitializeFromProtoUnsafe(innerState)
assert.NoError(t, err)
newRt, newErr := newState.HashTreeRoot(context.Background())
rt, err := stateObj.HashTreeRoot(context.Background())
nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background())
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.Equal(t, rt, newRt)
assert.Equal(t, rt, nativeRt)
}
newSSZ, newErr := newState.MarshalSSZ()
stateObjSSZ, err := stateObj.MarshalSSZ()
nativeSSZ, nativeErr := nativeState.MarshalSSZ()
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.DeepEqual(t, newSSZ, stateObjSSZ)
assert.DeepEqual(t, newSSZ, nativeSSZ)
}
})
}

View File

@@ -218,6 +218,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
}
// Initializes the Merkle layers for the beacon state if they are empty.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
if len(b.merkleLayers) > 0 {
@@ -234,6 +235,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
}
// Recomputes the Merkle layers for the dirty fields in the state.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
for field := range b.dirtyFields {

View File

@@ -0,0 +1,3 @@
go test fuzz v1
[]byte("")
uint64(117)

View File

@@ -68,11 +68,14 @@ go_test(
"proofs_test.go",
"references_test.go",
"setters_test.go",
"state_fuzz_test.go",
"state_trie_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//beacon-chain/state/types:go_default_library",
@@ -81,6 +84,7 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -175,7 +175,8 @@ func (b *BeaconState) NumValidators() int {
}
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
//
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
if !b.hasInnerState() {
return ErrNilInnerState

View File

@@ -0,0 +1,90 @@
//go:build go1.18
// +build go1.18
package v3_test
import (
"context"
"testing"
coreState "github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/crypto/rand"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/util"
)
func FuzzV3StateHashTreeRoot(f *testing.F) {
gState, _ := util.DeterministicGenesisStateBellatrix(f, 100)
output, err := gState.MarshalSSZ()
assert.NoError(f, err)
randPool := make([]byte, 100)
_, err = rand.NewDeterministicGenerator().Read(randPool)
assert.NoError(f, err)
f.Add(randPool, uint64(10))
f.Fuzz(func(t *testing.T, diffBuffer []byte, slotsToTransition uint64) {
stateSSZ := bytesutil.SafeCopyBytes(output)
for i := 0; i < len(diffBuffer); i += 9 {
if i+8 >= len(diffBuffer) {
return
}
num := bytesutil.BytesToUint64BigEndian(diffBuffer[i : i+8])
num %= uint64(len(diffBuffer))
// Perform a XOR on the byte of the selected index.
stateSSZ[num] ^= diffBuffer[i+8]
}
pbState := &ethpb.BeaconStateBellatrix{}
err := pbState.UnmarshalSSZ(stateSSZ)
if err != nil {
return
}
nativeState, err := native.InitializeFromProtoBellatrix(pbState)
if err != nil {
return
}
slotsToTransition %= 100
stateObj, err := v3.InitializeFromProtoUnsafe(pbState)
assert.NoError(t, err)
for stateObj.Slot() < types.Slot(slotsToTransition) {
stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1)
assert.NoError(t, err)
stateObj.Copy()
nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1)
assert.NoError(t, err)
nativeState.Copy()
}
assert.NoError(t, err)
// Perform a cold HTR calculation by initializing a new state.
innerState, ok := stateObj.InnerStateUnsafe().(*ethpb.BeaconStateBellatrix)
assert.Equal(t, true, ok, "inner state is a not a beacon state bellatrix proto")
newState, err := v3.InitializeFromProtoUnsafe(innerState)
assert.NoError(t, err)
newRt, newErr := newState.HashTreeRoot(context.Background())
rt, err := stateObj.HashTreeRoot(context.Background())
nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background())
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.Equal(t, rt, newRt)
assert.Equal(t, rt, nativeRt)
}
newSSZ, newErr := newState.MarshalSSZ()
stateObjSSZ, err := stateObj.MarshalSSZ()
nativeSSZ, nativeErr := nativeState.MarshalSSZ()
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.DeepEqual(t, newSSZ, stateObjSSZ)
assert.DeepEqual(t, newSSZ, nativeSSZ)
}
})
}

View File

@@ -218,6 +218,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
}
// Initializes the Merkle layers for the beacon state if they are empty.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
if len(b.merkleLayers) > 0 {
@@ -234,6 +235,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
}
// Recomputes the Merkle layers for the dirty fields in the state.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) recomputeDirtyFields(_ context.Context) error {
for field := range b.dirtyFields {

View File

@@ -149,6 +149,7 @@ go_test(
"subscriber_beacon_blocks_test.go",
"subscriber_test.go",
"subscription_topic_handler_test.go",
"sync_fuzz_test.go",
"sync_test.go",
"utils_test.go",
"validate_aggregate_proof_test.go",

View File

@@ -95,6 +95,7 @@ type blockchainService interface {
blockchain.TimeFetcher
blockchain.GenesisFetcher
blockchain.CanonicalFetcher
blockchain.OptimisticModeFetcher
blockchain.SlashingReceiver
}

View File

@@ -0,0 +1,281 @@
//go:build go1.18
// +build go1.18
package sync
import (
"bytes"
"context"
"reflect"
"testing"
"time"
"github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
gcache "github.com/patrickmn/go-cache"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func FuzzValidateBeaconBlockPubSub_Phase0(f *testing.F) {
db := dbtest.SetupDB(f)
p := p2ptest.NewFuzzTestP2P()
ctx := context.Background()
beaconState, privKeys := util.DeterministicGenesisState(f, 100)
parentBlock := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(parentBlock)
require.NoError(f, err)
require.NoError(f, db.SaveBlock(ctx, wsb))
bRoot, err := parentBlock.Block.HashTreeRoot()
require.NoError(f, err)
require.NoError(f, db.SaveState(ctx, beaconState, bRoot))
require.NoError(f, db.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bRoot[:]}))
copied := beaconState.Copy()
require.NoError(f, copied.SetSlot(1))
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
require.NoError(f, err)
msg := util.NewBeaconBlock()
msg.Block.ParentRoot = bRoot[:]
msg.Block.Slot = 1
msg.Block.ProposerIndex = proposerIdx
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(f, err)
stateGen := stategen.New(db)
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r := &Service{
cfg: &config{
beaconDB: db,
p2p: p,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
blockNotifier: chainService.BlockNotifier(),
stateGen: stateGen,
},
seenBlockCache: lruwrpr.New(10),
badBlockCache: lruwrpr.New(10),
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
}
buf := new(bytes.Buffer)
_, err = p.Encoding().EncodeGossip(buf, msg)
require.NoError(f, err)
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
digest, err := r.currentForkDigest()
assert.NoError(f, err)
topic = r.addDigestToTopic(topic, digest)
f.Add("junk", []byte("junk"), buf.Bytes(), []byte(topic))
f.Fuzz(func(t *testing.T, pid string, from, data, topic []byte) {
r.cfg.p2p = p2ptest.NewFuzzTestP2P()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
cService := &mock.ChainService{
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot*10000000), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r.cfg.chain = cService
r.cfg.blockNotifier = cService.BlockNotifier()
strTop := string(topic)
msg := &pubsub.Message{
Message: &pb.Message{
From: from,
Data: data,
Topic: &strTop,
},
}
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
_ = err
})
}
func FuzzValidateBeaconBlockPubSub_Altair(f *testing.F) {
db := dbtest.SetupDB(f)
p := p2ptest.NewFuzzTestP2P()
ctx := context.Background()
beaconState, privKeys := util.DeterministicGenesisStateAltair(f, 100)
parentBlock := util.NewBeaconBlockAltair()
wsb, err := wrapper.WrappedSignedBeaconBlock(parentBlock)
require.NoError(f, err)
require.NoError(f, db.SaveBlock(ctx, wsb))
bRoot, err := parentBlock.Block.HashTreeRoot()
require.NoError(f, err)
require.NoError(f, db.SaveState(ctx, beaconState, bRoot))
require.NoError(f, db.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bRoot[:]}))
copied := beaconState.Copy()
require.NoError(f, copied.SetSlot(1))
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
require.NoError(f, err)
msg := util.NewBeaconBlock()
msg.Block.ParentRoot = bRoot[:]
msg.Block.Slot = 1
msg.Block.ProposerIndex = proposerIdx
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(f, err)
stateGen := stategen.New(db)
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r := &Service{
cfg: &config{
beaconDB: db,
p2p: p,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
blockNotifier: chainService.BlockNotifier(),
stateGen: stateGen,
},
seenBlockCache: lruwrpr.New(10),
badBlockCache: lruwrpr.New(10),
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
}
buf := new(bytes.Buffer)
_, err = p.Encoding().EncodeGossip(buf, msg)
require.NoError(f, err)
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
digest, err := r.currentForkDigest()
assert.NoError(f, err)
topic = r.addDigestToTopic(topic, digest)
f.Add("junk", []byte("junk"), buf.Bytes(), []byte(topic))
f.Fuzz(func(t *testing.T, pid string, from, data, topic []byte) {
r.cfg.p2p = p2ptest.NewFuzzTestP2P()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
cService := &mock.ChainService{
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot*10000000), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r.cfg.chain = cService
r.cfg.blockNotifier = cService.BlockNotifier()
strTop := string(topic)
msg := &pubsub.Message{
Message: &pb.Message{
From: from,
Data: data,
Topic: &strTop,
},
}
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
_ = err
})
}
func FuzzValidateBeaconBlockPubSub_Bellatrix(f *testing.F) {
db := dbtest.SetupDB(f)
p := p2ptest.NewFuzzTestP2P()
ctx := context.Background()
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(f, 100)
parentBlock := util.NewBeaconBlockBellatrix()
wsb, err := wrapper.WrappedSignedBeaconBlock(parentBlock)
require.NoError(f, err)
require.NoError(f, db.SaveBlock(ctx, wsb))
bRoot, err := parentBlock.Block.HashTreeRoot()
require.NoError(f, err)
require.NoError(f, db.SaveState(ctx, beaconState, bRoot))
require.NoError(f, db.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bRoot[:]}))
copied := beaconState.Copy()
require.NoError(f, copied.SetSlot(1))
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
require.NoError(f, err)
msg := util.NewBeaconBlock()
msg.Block.ParentRoot = bRoot[:]
msg.Block.Slot = 1
msg.Block.ProposerIndex = proposerIdx
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(f, err)
stateGen := stategen.New(db)
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r := &Service{
cfg: &config{
beaconDB: db,
p2p: p,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
blockNotifier: chainService.BlockNotifier(),
stateGen: stateGen,
},
seenBlockCache: lruwrpr.New(10),
badBlockCache: lruwrpr.New(10),
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
}
buf := new(bytes.Buffer)
_, err = p.Encoding().EncodeGossip(buf, msg)
require.NoError(f, err)
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
digest, err := r.currentForkDigest()
assert.NoError(f, err)
topic = r.addDigestToTopic(topic, digest)
f.Add("junk", []byte("junk"), buf.Bytes(), []byte(topic))
f.Fuzz(func(t *testing.T, pid string, from, data, topic []byte) {
r.cfg.p2p = p2ptest.NewFuzzTestP2P()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
cService := &mock.ChainService{
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot*10000000), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r.cfg.chain = cService
r.cfg.blockNotifier = cService.BlockNotifier()
strTop := string(topic)
msg := &pubsub.Message{
Message: &pb.Message{
From: from,
Data: data,
Topic: &strTop,
},
}
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
_ = err
})
}

View File

@@ -22,7 +22,8 @@ func UseE2EMainnetConfig() {
}
// E2ETestConfig retrieves the configurations made specifically for E2E testing.
// Warning: This config is only for testing, it is not meant for use outside of E2E.
//
// WARNING: This config is only for testing, it is not meant for use outside of E2E.
func E2ETestConfig() *BeaconChainConfig {
e2eConfig := MinimalSpecConfig()

View File

@@ -1,27 +1,8 @@
# End-to-end Testing Package
This is the main project folder of the end-to-end testing suite for Prysm. This performs a full end-to-end test for Prysm, including spinning up an ETH1 dev chain, sending deposits to the deposit contract, and making sure the beacon node and its validators are running and performing properly for a few epochs.
It also performs a test on a syncing node, and supports featureflags to allow easy E2E testing of experimental features.
It also performs a test on a syncing node, and supports feature flags to allow easy E2E testing of experimental features.
## How it works
Through the `end2EndConfig` struct, you can declare several options such as how many epochs the test should run for, and what `BeaconConfig` the test should use. You can also declare how many beacon nodes and validator clients are run, the E2E will automatically divide the validators evently among the beacon nodes.
In order to "evaluate" the state of the beacon chain while the E2E is running, there are `Evaluators` that use the beacon chain node API to determine if the network is performing as it should. This can evaluate for conditions like validator activation, finalization, validator participation and more.
Evaluators have 3 parts, the name for it's test name, a `policy` which declares which epoch(s) the evaluator should run, and then the `evaluation` which uses the beacon chain API to determine if the beacon chain passes certain conditions like finality.
## Current end-to-end tests
* Minimal Config - 2 beacon nodes, 256 validators, running for 8 epochs
* Minimal Config Slashing Test - 2 beacon nodes, 256 validators, tests attester and proposer slashing
## Instructions
Note: Java 11 or greater is required to run web3signer.
If you wish to run all the minimal spec E2E tests, you can run them through bazel with:
```
bazel test //testing/endtoend:go_default_test --test_output=streamed
```
Please see our docs page, https://docs.prylabs.network/docs/devtools/end-to-end, to read more about the feature.

View File

@@ -11,6 +11,7 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/validator/service:go_default_library",
"//encoding/bytesutil:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//validator/accounts:go_default_library",
"//validator/accounts/wallet:go_default_library",

View File

@@ -539,8 +539,19 @@ func feeRecipientConfig(cliCtx *cli.Context) (*validatorServiceConfig.FeeRecipie
if !common.IsHexAddress(option.FeeRecipient) {
return nil, errors.New("fee recipient is not a valid eth1 address")
}
mixedcaseAddress, err := common.NewMixedcaseAddressFromString(option.FeeRecipient)
if err != nil {
return nil, errors.Wrapf(err, "could not decode fee recipient %s", option.FeeRecipient)
}
checksumAddress := common.BytesToAddress(feebytes)
if !mixedcaseAddress.ValidChecksum() {
log.Warnf("Fee recipient %s is not a checksum Ethereum address. "+
"The checksummed address is %s and will be used as the fee recipient. "+
"We recommend using a mixed-case address (checksum) "+
"to prevent spelling mistakes in your fee recipient Ethereum address", option.FeeRecipient, checksumAddress.Hex())
}
frConfig.ProposeConfig[bytesutil.ToBytes48(decodedKey)] = &validatorServiceConfig.FeeRecipientOptions{
FeeRecipient: common.BytesToAddress(feebytes),
FeeRecipient: checksumAddress,
}
}
}

View File

@@ -16,6 +16,7 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
validator_service_config "github.com/prysmaticlabs/prysm/config/validator/service"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/validator/accounts"
"github.com/prysmaticlabs/prysm/validator/accounts/wallet"
@@ -312,6 +313,8 @@ func TestUnmarshalFromURL(t *testing.T) {
}
func TestFeeRecipientConfig(t *testing.T) {
hook := logTest.NewGlobal()
type feeRecipientFlag struct {
dir string
url string
@@ -326,12 +329,13 @@ func TestFeeRecipientConfig(t *testing.T) {
want func() *validator_service_config.FeeRecipientConfig
urlResponse string
wantErr string
wantLog string
}{
{
name: "Happy Path Config file File",
name: "Happy Path Config file File, bad checksum",
args: args{
feeRecipientFlagValues: &feeRecipientFlag{
dir: "./testdata/good-prepare-beacon-proposer-config.json",
dir: "./testdata/good-prepare-beacon-proposer-config-badchecksum.json",
url: "",
defaultfee: "",
},
@@ -342,15 +346,16 @@ func TestFeeRecipientConfig(t *testing.T) {
return &validator_service_config.FeeRecipientConfig{
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*validator_service_config.FeeRecipientOptions{
bytesutil.ToBytes48(key1): {
FeeRecipient: common.HexToAddress("0x50155530FCE8a85ec7055A5F8b2bE214B3DaeFd3"),
FeeRecipient: common.HexToAddress("0xae967917c465db8578ca9024c205720b1a3651A9"),
},
},
DefaultConfig: &validator_service_config.FeeRecipientOptions{
FeeRecipient: common.HexToAddress("0x6e35733c5af9B61374A128e6F85f553aF09ff89A"),
FeeRecipient: common.HexToAddress("0xae967917c465db8578ca9024c205720b1a3651A9"),
},
}
},
wantErr: "",
wantLog: "is not a checksum Ethereum address",
},
{
name: "Happy Path Config file File multiple fee recipients",
@@ -506,6 +511,11 @@ func TestFeeRecipientConfig(t *testing.T) {
require.ErrorContains(t, tt.wantErr, err)
return
}
if tt.wantLog != "" {
assert.LogsContain(t, hook,
tt.wantLog,
)
}
w := tt.want()
require.DeepEqual(t, w, got)
})

View File

@@ -0,0 +1,10 @@
{
"proposer_config": {
"0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a": {
"fee_recipient": "0xae967917c465db8578ca9024c205720b1a3651A9"
}
},
"default_config": {
"fee_recipient": "0xae967917c465db8578ca9024c205720b1a3651A9"
}
}