Compare commits

...

20 Commits

Author SHA1 Message Date
Raul Jordan
34c49e9fe8 Merge branch 'develop' into dynamic-config-unload 2022-05-12 13:06:38 -04:00
Raul Jordan
38f99a4ff1 attemt 2022-05-12 13:06:30 -04:00
Potuz
61033ebea1 handle failure to update head (#10651)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 16:36:46 +00:00
Potuz
e808025b17 regression test off-by-one (#10675)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 15:49:37 +00:00
Radosław Kapka
7db0435ee0 Unify WARNING comments (#10678)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-12 15:25:44 +00:00
Nishant Das
1f086e4333 add more fuzz targets (#10682) 2022-05-12 10:47:29 -04:00
james-prysm
184e5be9de Fee recipient: checksum log (#10664)
* adding checksum check at validator client and beacon node

* adding validation and logs on validator client startup

* moving the log and validation

* fixing unit tests

* adding test for back checksum on validator client

* fixing bazel

* addressing comments

* fixing log display

* Update beacon-chain/node/config.go

* Apply suggestions from code review

* breaking up lines

* fixing unit test

* ugh another fix to unit test

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-11 19:36:57 +00:00
terencechain
e33850bf51 Don't return nil with new head (#10680) 2022-05-11 11:33:10 -07:00
Preston Van Loon
cc643ac4cc native-state: Simplify MarshalSSZ (#10677)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-11 14:25:11 +00:00
Nishant Das
abefe1e9d5 Add Sync Block Topic Fuzz Target (#10676)
* add new target

* remove

* Update beacon-chain/sync/BUILD.bazel
2022-05-11 13:18:12 +00:00
Nishant Das
b4e89fb28b Add in State Fuzzing (#10375)
* add it in

* build tags

* gaz

* remove dependency on fast-ssz for now

* copy and comment

* add in native state

* fix build

* no assert on invalid input, just return

* Add failing case

* fix it up

Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-05-11 12:47:54 +00:00
terencechain
4ad1c4df01 Cache and use justified and finalized payload block hash (#10657)
* Cache and use justified and finalized payload block hash

* Fix tests

* Use real byte

* Fix conflicts

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 21:20:28 +00:00
terencechain
6a197b47d9 Call fcu on invalid payload (#10565)
* Starting

* remove finalized root

* Just call fcu

* Review feedbacks

* fix one test

* Fix conflicts

* Update execution_engine_test.go

* Add a test for invalid recursive call

* Add comprehensive recursive test

* dissallow override empty hash

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 20:02:00 +00:00
Radosław Kapka
d102421a25 Improve ReceiveBlock's comment (#10671)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-10 19:13:28 +00:00
Radosław Kapka
7b1490429c Add link to e2e docs in README (#10672) 2022-05-10 14:16:40 +00:00
terencechain
bbdf19cfd0 Process atts and update head before proposing (#10653)
* Process atts and updeate head

* Fix ctx

* New test and old tests

* Update validator_test.go

* Update validator_test.go

* Update service.go

* Rename to UpdateHead

* Update receive_attestation.go

* Update receive_attestation.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-09 23:12:50 +00:00
Radosław Kapka
5d94030b4f Cleanup of stategen package (#10607)
* powchain and stategen

* revert powchain changes

* rename field to blockRootsOfSavedStates

* rename params to blockRoot

* review feedback

* fix loop

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-09 22:25:47 +00:00
terencechain
16273a2040 Add engine timeout values (#10645)
* Add timeout values

* Update engine_client.go

* Update engine_client.go

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/powchain/engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update engine_client.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-05-09 20:02:20 +00:00
Preston Van Loon
97663548a1 fuzz: Add fuzz tests for sparse merkle trie (#10662)
* Add fuzz tests for sparse merkle trie and change HTR signature to return an error

* fix capitalization of error message
2022-05-09 16:51:48 +00:00
Radosław Kapka
7d9d8454b1 Improvements to powchain package (#10652)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-05-09 13:57:34 +00:00
112 changed files with 1901 additions and 643 deletions

View File

@@ -31,6 +31,12 @@ type ChainInfoFetcher interface {
HeadDomainFetcher
}
// HeadUpdater defines a common interface for methods in blockchain service
// which allow to update the head info
type HeadUpdater interface {
UpdateHead(context.Context) error
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
type TimeFetcher interface {
GenesisTime() time.Time

View File

@@ -51,7 +51,7 @@ func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckpt(cp)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
}
@@ -62,7 +62,7 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
genesisRoot := [32]byte{'A'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c := setupBeaconChain(t, beaconDB)
c.store.SetFinalizedCheckpt(cp)
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
c.originBlockRoot = genesisRoot
assert.DeepEqual(t, c.originBlockRoot[:], c.FinalizedCheckpt().Root)
}
@@ -73,7 +73,7 @@ func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
c := setupBeaconChain(t, beaconDB)
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
cp := &ethpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
c.store.SetJustifiedCheckpt(cp)
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
}
@@ -83,7 +83,7 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
c := setupBeaconChain(t, beaconDB)
genesisRoot := [32]byte{'B'}
cp := &ethpb.Checkpoint{Root: genesisRoot[:]}
c.store.SetJustifiedCheckpt(cp)
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
c.originBlockRoot = genesisRoot
assert.DeepEqual(t, c.originBlockRoot[:], c.CurrentJustifiedCheckpt().Root)
}

View File

@@ -31,11 +31,9 @@ var (
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
type notifyForkchoiceUpdateArg struct {
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.BeaconBlock
finalizedRoot [32]byte
justifiedRoot [32]byte
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.BeaconBlock
}
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
@@ -61,18 +59,12 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedHash, err := s.getPayloadHash(ctx, arg.finalizedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block hash")
}
justifiedHash, err := s.getPayloadHash(ctx, arg.justifiedRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get justified block hash")
}
finalizedHash := s.store.FinalizedPayloadBlockHash()
justifiedHash := s.store.JustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: justifiedHash,
FinalizedBlockHash: finalizedHash,
SafeBlockHash: justifiedHash[:],
FinalizedBlockHash: finalizedHash[:],
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
@@ -89,10 +81,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
case powchain.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
headRoot := arg.headRoot
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
if err != nil {
@@ -101,12 +94,35 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
return nil, err
}
r, err := s.updateHead(ctx, s.justifiedBalances.balances)
if err != nil {
return nil, err
}
b, err := s.getBlock(ctx, r)
if err != nil {
return nil, err
}
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
return nil, err
}
pid, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: st,
headRoot: r,
headBlock: b.Block(),
})
if err != nil {
return nil, err
}
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", headRoot),
"invalidCount": len(invalidRoots),
}).Warn("Pruned invalid blocks")
return nil, ErrInvalidPayload
return pid, ErrInvalidPayload
default:
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
@@ -125,19 +141,19 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
// getPayloadHash returns the payload hash given the block root.
// if the block is before bellatrix fork epoch, it returns the zero hash.
func (s *Service) getPayloadHash(ctx context.Context, root [32]byte) ([]byte, error) {
finalizedBlock, err := s.getBlock(ctx, s.ensureRootNotZeros(root))
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
blk, err := s.getBlock(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(root)))
if err != nil {
return nil, err
return [32]byte{}, err
}
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
return params.BeaconConfig().ZeroHash[:], nil
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return params.BeaconConfig().ZeroHash, nil
}
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
payload, err := blk.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
}
return payload.BlockHash, nil
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload.

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
@@ -174,12 +175,15 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
fc := &ethpb.Checkpoint{Epoch: 1, Root: tt.finalizedRoot[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
arg := &notifyForkchoiceUpdateArg{
headState: st,
headRoot: tt.headRoot,
headBlock: tt.blk,
finalizedRoot: tt.finalizedRoot,
justifiedRoot: tt.justifiedRoot,
headState: st,
headRoot: tt.headRoot,
headBlock: tt.blk,
}
_, err := service.notifyForkchoiceUpdate(ctx, arg)
if tt.errString != "" {
@@ -191,6 +195,147 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
}
//
//
// A <- B <- C <- D
// \
// ---------- E <- F
// \
// ------ G
// D is the current head, attestations for F and G come late, both are invalid.
// We switch recursively to F then G and finally to D.
//
// We test:
// 1. forkchoice removes blocks F and G from the forkchoice implementation
// 2. forkchoice removes the weights of these blocks
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
// Prepare blocks
ba := util.NewBeaconBlockBellatrix()
ba.Block.Body.ExecutionPayload.BlockNumber = 1
wba, err := wrapper.WrappedSignedBeaconBlock(ba)
require.NoError(t, err)
bra, err := wba.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wba))
bb := util.NewBeaconBlockBellatrix()
bb.Block.Body.ExecutionPayload.BlockNumber = 2
wbb, err := wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
brb, err := wbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbb))
bc := util.NewBeaconBlockBellatrix()
bc.Block.Body.ExecutionPayload.BlockNumber = 3
wbc, err := wrapper.WrappedSignedBeaconBlock(bc)
require.NoError(t, err)
brc, err := wbc.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbc))
bd := util.NewBeaconBlockBellatrix()
pd := [32]byte{'D'}
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
bd.Block.Body.ExecutionPayload.BlockNumber = 4
wbd, err := wrapper.WrappedSignedBeaconBlock(bd)
require.NoError(t, err)
brd, err := wbd.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbd))
be := util.NewBeaconBlockBellatrix()
pe := [32]byte{'E'}
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
be.Block.Body.ExecutionPayload.BlockNumber = 5
wbe, err := wrapper.WrappedSignedBeaconBlock(be)
require.NoError(t, err)
bre, err := wbe.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbe))
bf := util.NewBeaconBlockBellatrix()
pf := [32]byte{'F'}
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
bf.Block.Body.ExecutionPayload.BlockNumber = 6
bf.Block.ParentRoot = bre[:]
wbf, err := wrapper.WrappedSignedBeaconBlock(bf)
require.NoError(t, err)
brf, err := wbf.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbf))
bg := util.NewBeaconBlockBellatrix()
bg.Block.Body.ExecutionPayload.BlockNumber = 7
pg := [32]byte{'G'}
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
bg.Block.ParentRoot = bre[:]
wbg, err := wrapper.WrappedSignedBeaconBlock(bg)
require.NoError(t, err)
brg, err := wbg.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
// Insert blocks into forkchoice
fcs := doublylinkedtree.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
service.justifiedBalances.balances = []uint64{50, 100, 200}
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0))
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0))
// Insert Attestations to D, F and G so that they have higher weight than D
// Ensure G is head
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
headRoot, err := fcs.Head(ctx, 0, bra, []uint64{50, 100, 200}, 0)
require.NoError(t, err)
require.Equal(t, brg, headRoot)
// Prepare Engine Mock to return invalid unless head is D, LVH = E
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
fc := &ethpb.Checkpoint{Epoch: 0, Root: bra[:]}
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
a := &notifyForkchoiceUpdateArg{
headState: st,
headBlock: wbg.Block(),
headRoot: brg,
}
_, err = service.notifyForkchoiceUpdate(ctx, a)
require.ErrorIs(t, ErrInvalidPayload, err)
// Ensure Head is D
headRoot, err = fcs.Head(ctx, 0, bra, service.justifiedBalances.balances, 0)
require.NoError(t, err)
require.Equal(t, brd, headRoot)
// Ensure F and G where removed but their parent E wasn't
require.Equal(t, false, fcs.HasNode(brf))
require.Equal(t, false, fcs.HasNode(brg))
require.Equal(t, true, fcs.HasNode(bre))
}
func Test_NotifyNewPayload(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
@@ -537,11 +682,11 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
jRoot, err := tt.justified.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
service.store.SetJustifiedCheckpt(
service.store.SetJustifiedCheckptAndPayloadHash(
&ethpb.Checkpoint{
Root: jRoot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
})
}, [32]byte{'a'})
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
err = service.optimisticCandidateBlock(ctx, tt.blk)
@@ -803,7 +948,7 @@ func TestService_getPayloadHash(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = service.getPayloadHash(ctx, [32]byte{})
_, err = service.getPayloadHash(ctx, []byte{})
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
b := util.NewBeaconBlock()
@@ -813,20 +958,20 @@ func TestService_getPayloadHash(t *testing.T) {
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
h, err := service.getPayloadHash(ctx, r)
h, err := service.getPayloadHash(ctx, r[:])
require.NoError(t, err)
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], h)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, h)
bb := util.NewBeaconBlockBellatrix()
h = []byte{'a'}
bb.Block.Body.ExecutionPayload.BlockHash = h
h = [32]byte{'a'}
bb.Block.Body.ExecutionPayload.BlockHash = h[:]
r, err = b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(bb)
require.NoError(t, err)
service.saveInitSyncBlock(r, wsb)
h, err = service.getPayloadHash(ctx, r)
h, err = service.getPayloadHash(ctx, r[:])
require.NoError(t, err)
require.DeepEqual(t, []byte{'a'}, h)
require.DeepEqual(t, [32]byte{'a'}, h)
}

View File

@@ -154,8 +154,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{'b'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
headRoot, err := service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
@@ -298,8 +298,8 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
Root: bellatrixBlkRoot[:],
Epoch: 1,
}
service.store.SetFinalizedCheckpt(fcp)
service.store.SetJustifiedCheckpt(fcp)
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)

View File

@@ -21,7 +21,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
}
}
// warning: only use these opts when you are certain there are no db calls
// WARNING: only use these opts when you are certain there are no db calls
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
// initialization requirements w/o the overhead of db init.
func testServiceOptsNoDB() []Option {

View File

@@ -64,7 +64,11 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
return err
}
if bytes.Equal(r, f.Root) {
s.store.SetJustifiedCheckpt(bj)
h, err := s.getPayloadHash(ctx, bj.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
}
}
return nil

View File

@@ -324,9 +324,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
r := [32]byte{'g'}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
r = bytesutil.ToBytes32([]byte{'A'})
@@ -358,9 +358,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: r[:]})
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
@@ -500,7 +500,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -535,7 +535,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
@@ -564,7 +564,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r32[:], Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
@@ -591,7 +591,7 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: r32[:], Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33

View File

@@ -123,7 +123,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
@@ -146,9 +148,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
return err
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
@@ -195,9 +194,19 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
newFinalized := postState.FinalizedCheckpointEpoch() > finalized.Epoch
if newFinalized {
s.store.SetPrevFinalizedCheckpt(finalized)
s.store.SetFinalizedCheckpt(postState.FinalizedCheckpoint())
cp := postState.FinalizedCheckpoint()
h, err := s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(cp, h)
s.store.SetPrevJustifiedCheckpt(justified)
s.store.SetJustifiedCheckpt(postState.CurrentJustifiedCheckpoint())
cp = postState.CurrentJustifiedCheckpoint()
h, err = s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
@@ -413,6 +422,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
}
s.saveInitSyncBlock(blockRoots[i], b)
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return nil, nil, err
}
}
for r, st := range boundaries {
@@ -426,14 +439,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return nil, nil, err
}
f := fCheckpoints[len(fCheckpoints)-1]
j := jCheckpoints[len(jCheckpoints)-1]
arg := &notifyForkchoiceUpdateArg{
headState: preState,
headRoot: lastBR,
headBlock: lastB.Block(),
finalizedRoot: bytesutil.ToBytes32(f.Root),
justifiedRoot: bytesutil.ToBytes32(j.Root),
headState: preState,
headRoot: lastBR,
headBlock: lastB.Block(),
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return nil, nil, err
@@ -484,7 +493,11 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
return err
}
s.store.SetPrevFinalizedCheckpt(finalized)
s.store.SetFinalizedCheckpt(fCheckpoint)
h, err := s.getPayloadHash(ctx, fCheckpoint.Root)
if err != nil {
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
}
return nil
}

View File

@@ -208,7 +208,11 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
return errNilJustifiedInStore
}
s.store.SetPrevJustifiedCheckpt(justified)
s.store.SetJustifiedCheckpt(cpt)
h, err := s.getPayloadHash(ctx, cpt.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
}
return nil
@@ -227,7 +231,11 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
return err
}
s.store.SetJustifiedCheckpt(cp)
h, err := s.getPayloadHash(ctx, cp.Root)
if err != nil {
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
return nil
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"strconv"
"sync"
"testing"
"time"
@@ -39,6 +40,7 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/time"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestStore_OnBlock_ProtoArray(t *testing.T) {
@@ -129,9 +131,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: roots[0]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
root, err := tt.blk.Block.HashTreeRoot()
@@ -232,9 +234,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: roots[0]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
root, err := tt.blk.Block.HashTreeRoot()
@@ -289,7 +291,8 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
@@ -353,7 +356,8 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
@@ -415,7 +419,9 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.saveInitSyncBlock(gRoot, wsb)
st, keys := util.DeterministicGenesisState(t, 64)
@@ -484,7 +490,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err = service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
assert.Equal(t, true, update, "Should be able to update justified")
@@ -516,7 +522,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
@@ -549,7 +555,7 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
@@ -577,7 +583,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -614,7 +620,7 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -648,7 +654,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
@@ -656,7 +662,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Slot = 1
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
wb, err := wrapper.WrappedBeaconBlock(b.Block)
require.NoError(t, err)
err = service.verifyBlkPreState(ctx, wb)
@@ -691,7 +697,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
r, err := signedBlock.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: []byte{'A'}}, [32]byte{'a'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: []byte{'A'}})
st, err := util.NewBeaconState()
require.NoError(t, err)
@@ -723,7 +729,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -768,7 +774,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -814,7 +820,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -863,7 +869,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -913,7 +919,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -974,7 +980,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -1350,7 +1356,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
for _, tt := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:]}, [32]byte{})
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
@@ -1378,7 +1384,7 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
service.originBlockRoot = gRoot
currentCp := &ethpb.Checkpoint{Epoch: 1}
service.store.SetJustifiedCheckpt(currentCp)
service.store.SetJustifiedCheckptAndPayloadHash(currentCp, [32]byte{'a'})
newCp := &ethpb.Checkpoint{Epoch: 2, Root: gRoot[:]}
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
@@ -1439,7 +1445,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -1493,7 +1499,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
testState := gs.Copy()
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
@@ -1524,7 +1530,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
@@ -1563,7 +1569,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
gs = gs.Copy()
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(6))
@@ -1887,3 +1893,82 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
require.NoError(t, err)
service.insertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
}
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
blk1, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
wsb1, err := wrapper.WrappedSignedBeaconBlock(blk1)
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
wsb2, err := wrapper.WrappedSignedBeaconBlock(blk2)
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
wsb3, err := wrapper.WrappedSignedBeaconBlock(blk3)
require.NoError(t, err)
blk4, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 4)
require.NoError(t, err)
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
wsb4, err := wrapper.WrappedSignedBeaconBlock(blk4)
require.NoError(t, err)
logHook := logTest.NewGlobal()
for i := 0; i < 10; i++ {
var wg sync.WaitGroup
wg.Add(4)
go func() {
require.NoError(t, service.onBlock(ctx, wsb1, r1))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb2, r2))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb3, r3))
wg.Done()
}()
go func() {
require.NoError(t, service.onBlock(ctx, wsb4, r4))
wg.Done()
}()
wg.Wait()
require.LogsDoNotContain(t, logHook, "New head does not exist in DB. Do nothing")
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r1))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'a'})
}
}

View File

@@ -128,42 +128,55 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
return
}
// Continue when there's no fork choice attestation, there's nothing to process and update head.
// This covers the condition when the node is still initial syncing to the head of the chain.
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
continue
if err := s.UpdateHead(s.ctx); err != nil {
log.WithError(err).Error("Could not process attestations and update head")
return
}
s.processAttestations(s.ctx)
justified := s.store.JustifiedCheckpt()
if justified == nil {
log.WithError(errNilJustifiedInStore).Error("Could not get justified checkpoint")
continue
}
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(justified.Root))
if err != nil {
log.WithError(err).Errorf("Unable to get justified balances for root %v", justified.Root)
continue
}
newHeadRoot, err := s.updateHead(s.ctx, balances)
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
}
s.notifyEngineIfChangedHead(s.ctx, newHeadRoot)
}
}
}()
}
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
// It requires no external inputs.
func (s *Service) UpdateHead(ctx context.Context) error {
// Continue when there's no fork choice attestation, there's nothing to process and update head.
// This covers the condition when the node is still initial syncing to the head of the chain.
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
return nil
}
// Only one process can process attestations and update head at a time.
s.processAttestationsLock.Lock()
defer s.processAttestationsLock.Unlock()
s.processAttestations(ctx)
justified := s.store.JustifiedCheckpt()
if justified == nil {
return errNilJustifiedInStore
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
if err != nil {
return err
}
newHeadRoot, err := s.updateHead(ctx, balances)
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
}
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
return nil
}
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
if s.headRoot() == newHeadRoot {
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
return
}
@@ -172,12 +185,6 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
return // We don't have the block, don't notify the engine and update head.
}
finalized := s.store.FinalizedCheckpt()
if finalized == nil {
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
return
}
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not get new head block")
@@ -189,11 +196,9 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
return
}
arg := &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: newHeadRoot,
headBlock: newHeadBlock.Block(),
finalizedRoot: bytesutil.ToBytes32(finalized.Root),
justifiedRoot: bytesutil.ToBytes32(s.store.JustifiedCheckpt().Root),
headState: headState,
headRoot: newHeadRoot,
headBlock: newHeadBlock.Block(),
}
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
if err != nil {

View File

@@ -137,14 +137,14 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
service.notifyEngineIfChangedHead(ctx, service.headRoot())
hookErr := "could not notify forkchoice update"
finalizedErr := "could not get finalized checkpoint"
require.LogsDoNotContain(t, hook, finalizedErr)
invalidStateErr := "Could not get state from db"
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
service.saveInitSyncBlock([32]byte{'a'}, gb)
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
require.LogsContain(t, hook, finalizedErr)
require.LogsContain(t, hook, invalidStateErr)
hook.Reset()
service.head = &head{
@@ -169,9 +169,9 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
// Block in DB
@@ -191,12 +191,51 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
state: st,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
service.store.SetFinalizedCheckpt(finalized)
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
service.notifyEngineIfChangedHead(ctx, r1)
require.LogsDoNotContain(t, hook, finalizedErr)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
require.Equal(t, true, has)
require.Equal(t, types.ValidatorIndex(1), vId)
require.Equal(t, [8]byte{1}, payloadID)
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
service.notifyEngineIfChangedHead(ctx, [32]byte{})
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
opts = append(opts, WithAttestationPool(attestations.NewPool()), WithStateNotifier(&mockBeaconNode{}))
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: tRoot[:]}))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
b := util.NewBeaconBlock()
wb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
service.head.root = r // Old head
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.NoError(t, err, service.UpdateHead(ctx))
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
}

View File

@@ -30,9 +30,9 @@ type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
}
// ReceiveBlock is a function that defines the the operations (minus pubsub)
// that are performed on blocks that is received from regular sync service. The operations consists of:
// 1. Validate block, apply state transition and update check points
// ReceiveBlock is a function that defines the operations (minus pubsub)
// that are performed on a received block. The operations consist of:
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
@@ -85,7 +85,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
defer span.End()
// Apply state transition on the incoming newly received block batches, one by one.
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
if err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
@@ -94,10 +94,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
for i, b := range blocks {
blockCopy := b.Copy()
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,

View File

@@ -141,7 +141,8 @@ func TestService_ReceiveBlock(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
h := [32]byte{'a'}
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, h)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -181,7 +182,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wg := sync.WaitGroup{}
@@ -262,7 +263,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
@@ -312,7 +313,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
@@ -323,7 +324,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()
@@ -336,7 +337,7 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 10000000})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
s.genesisTime = time.Now()
require.NoError(t, s.checkSaveHotStateDB(context.Background()))

View File

@@ -50,21 +50,22 @@ const headSyncMinEpochsAfterCheckpoint = 128
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot types.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
store *store.Store
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot types.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
initSyncBlocksLock sync.RWMutex
justifiedBalances *stateBalanceCache
wsVerifier *WeakSubjectivityVerifier
store *store.Store
processAttestationsLock sync.Mutex
}
// config options for the service.
@@ -143,6 +144,7 @@ func (s *Service) Stop() error {
defer s.cancel()
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
// Save the last finalized state so that starting up in the following run will be much faster.
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
return err
}

View File

@@ -221,7 +221,8 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
require.NoError(t, err)
trie, _, err := util.DepositTrieFromDeposits(deposits)
require.NoError(t, err)
hashTreeRoot := trie.HashTreeRoot()
hashTreeRoot, err := trie.HashTreeRoot()
require.NoError(t, err)
genState, err := transition.EmptyGenesisState()
require.NoError(t, err)
err = genState.SetEth1Data(&ethpb.Eth1Data{
@@ -500,7 +501,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -521,7 +522,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
b := util.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -594,7 +595,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
@@ -617,7 +618,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)

View File

@@ -37,7 +37,7 @@ func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
// the previously read value. This cache assumes we only want to cache one
// set of balances for a single root (the current justified root).
//
// warning: this is not thread-safe on its own, relies on get() for locking
// WARNING: this is not thread-safe on its own, relies on get() for locking
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
stateBalanceCacheMiss.Inc()
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)

View File

@@ -23,6 +23,13 @@ func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
return s.justifiedCheckpt
}
// JustifiedPayloadBlockHash returns the justified payload block hash reflecting justified check point.
func (s *Store) JustifiedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.justifiedPayloadBlockHash
}
// PrevFinalizedCheckpt returns the previous finalized checkpoint in the Store.
func (s *Store) PrevFinalizedCheckpt() *ethpb.Checkpoint {
s.RLock()
@@ -37,6 +44,13 @@ func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
return s.finalizedCheckpt
}
// FinalizedPayloadBlockHash returns the finalized payload block hash reflecting finalized check point.
func (s *Store) FinalizedPayloadBlockHash() [32]byte {
s.RLock()
defer s.RUnlock()
return s.finalizedPayloadBlockHash
}
// SetPrevJustifiedCheckpt sets the previous justified checkpoint in the Store.
func (s *Store) SetPrevJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.Lock()
@@ -51,18 +65,20 @@ func (s *Store) SetBestJustifiedCheckpt(cp *ethpb.Checkpoint) {
s.bestJustifiedCheckpt = cp
}
// SetJustifiedCheckpt sets the justified checkpoint in the Store.
func (s *Store) SetJustifiedCheckpt(cp *ethpb.Checkpoint) {
// SetJustifiedCheckptAndPayloadHash sets the justified checkpoint and blockhash in the Store.
func (s *Store) SetJustifiedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.justifiedCheckpt = cp
s.justifiedPayloadBlockHash = h
}
// SetFinalizedCheckpt sets the finalized checkpoint in the Store.
func (s *Store) SetFinalizedCheckpt(cp *ethpb.Checkpoint) {
// SetFinalizedCheckptAndPayloadHash sets the finalized checkpoint and blockhash in the Store.
func (s *Store) SetFinalizedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
s.Lock()
defer s.Unlock()
s.finalizedCheckpt = cp
s.finalizedPayloadBlockHash = h
}
// SetPrevFinalizedCheckpt sets the previous finalized checkpoint in the Store.

View File

@@ -30,8 +30,10 @@ func Test_store_JustifiedCheckpt(t *testing.T) {
var cp *ethpb.Checkpoint
require.Equal(t, cp, s.JustifiedCheckpt())
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetJustifiedCheckpt(cp)
h := [32]byte{'b'}
s.SetJustifiedCheckptAndPayloadHash(cp, h)
require.Equal(t, cp, s.JustifiedCheckpt())
require.Equal(t, h, s.JustifiedPayloadBlockHash())
}
func Test_store_FinalizedCheckpt(t *testing.T) {
@@ -39,8 +41,10 @@ func Test_store_FinalizedCheckpt(t *testing.T) {
var cp *ethpb.Checkpoint
require.Equal(t, cp, s.FinalizedCheckpt())
cp = &ethpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
s.SetFinalizedCheckpt(cp)
h := [32]byte{'b'}
s.SetFinalizedCheckptAndPayloadHash(cp, h)
require.Equal(t, cp, s.FinalizedCheckpt())
require.Equal(t, h, s.FinalizedPayloadBlockHash())
}
func Test_store_PrevFinalizedCheckpt(t *testing.T) {

View File

@@ -17,9 +17,11 @@ import (
// best_justified_checkpoint: Checkpoint
// proposerBoostRoot: Root
type Store struct {
justifiedCheckpt *ethpb.Checkpoint
finalizedCheckpt *ethpb.Checkpoint
bestJustifiedCheckpt *ethpb.Checkpoint
justifiedCheckpt *ethpb.Checkpoint
justifiedPayloadBlockHash [32]byte
finalizedCheckpt *ethpb.Checkpoint
finalizedPayloadBlockHash [32]byte
bestJustifiedCheckpt *ethpb.Checkpoint
sync.RWMutex
// These are not part of the consensus spec, but we do use them to return gRPC API requests.
// TODO(10094): Consider removing in v3.

View File

@@ -451,6 +451,8 @@ func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool,
return s.Optimistic, nil
}
// ProcessAttestationsAndUpdateHead mocks the same method in the chain service.
func (s *ChainService) UpdateHead(_ context.Context) error { return nil }
// ReceiveAttesterSlashing mocks the same method in the chain service.
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {
}
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}

View File

@@ -79,7 +79,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
store: &store.Store{},
wsVerifier: wv,
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: tt.finalizedEpoch})
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: tt.finalizedEpoch}, [32]byte{})
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.store.FinalizedCheckpt().Epoch)
if tt.wantErr == nil {
require.NoError(t, err)

View File

@@ -430,7 +430,11 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
}
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
rootA, err := trie.HashTreeRoot()
require.NoError(t, err)
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, rootA, rootB)
}
func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
@@ -488,7 +492,11 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
}
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not generate deposit trie")
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
rootA, err := trie.HashTreeRoot()
require.NoError(t, err)
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, rootA, rootB)
}
func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {

View File

@@ -143,7 +143,8 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
},
}
balances := []uint64{0, 50}
root := depositTrie.HashTreeRoot()
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
beaconState, err := stateAltair.InitializeFromProto(&ethpb.BeaconStateAltair{
Validators: registry,
Balances: balances,
@@ -202,7 +203,8 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
dep[0].Data.Signature = make([]byte, 96)
trie, _, err := util.DepositTrieFromDeposits(dep)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: 1,

View File

@@ -173,7 +173,8 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
},
}
balances := []uint64{0, 50}
root := depositTrie.HashTreeRoot()
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: registry,
Balances: balances,
@@ -233,7 +234,8 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
dep[0].Data.Signature = make([]byte, 96)
trie, _, err := util.DepositTrieFromDeposits(dep)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: 1,
@@ -289,7 +291,9 @@ func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
require.NoError(t, err)
dep[i].Proof = proof
}
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: 1,
@@ -376,7 +380,9 @@ func TestProcessDeposit_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
},
}
balances := []uint64{0, 50}
root := depositTrie.HashTreeRoot()
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{
Validators: registry,
Balances: balances,

View File

@@ -41,7 +41,10 @@ func UpdateGenesisEth1Data(state state.BeaconState, deposits []*ethpb.Deposit, e
}
}
depositRoot := t.HashTreeRoot()
depositRoot, err := t.HashTreeRoot()
if err != nil {
return nil, err
}
eth1Data.DepositRoot = depositRoot[:]
err = state.SetEth1Data(eth1Data)
if err != nil {

View File

@@ -81,7 +81,7 @@ func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error)
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
node.parent.children = children[:len(children)-2]
node.parent.children = children[:len(children)-1]
break
}
}

View File

@@ -203,3 +203,26 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
f.store.proposerBoostLock.RUnlock()
}
// This is a regression test (10565)
// ----- C
// /
// A <- B
// \
// ----------D
// D is invalid
func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, 1, 1))
_, err := f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
require.NoError(t, err)
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
}

View File

@@ -24,6 +24,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
// SetOptimisticToValid is called with the root of a block that was returned as
// VALID by the EL.
//
// WARNING: This method returns an error if the root is not found in forkchoice
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
f.store.nodesLock.Lock()

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/params"
@@ -117,7 +118,18 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
if !common.IsHexAddress(ha) {
return fmt.Errorf("%s is not a valid fee recipient address", ha)
}
c.DefaultFeeRecipient = common.HexToAddress(ha)
mixedcaseAddress, err := common.NewMixedcaseAddressFromString(ha)
if err != nil {
return errors.Wrapf(err, "could not decode fee recipient %s", ha)
}
checksumAddress := common.HexToAddress(ha)
if !mixedcaseAddress.ValidChecksum() {
log.Warnf("Fee recipient %s is not a checksum Ethereum address. "+
"The checksummed address is %s and will be used as the fee recipient. "+
"We recommend using a mixed-case address (checksum) "+
"to prevent spelling mistakes in your fee recipient Ethereum address", ha, checksumAddress.Hex())
}
c.DefaultFeeRecipient = checksumAddress
params.OverrideBeaconConfig(c)
return nil
}

View File

@@ -87,6 +87,7 @@ func TestConfigureProofOfWork(t *testing.T) {
func TestConfigureExecutionSetting(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
@@ -102,11 +103,15 @@ func TestConfigureExecutionSetting(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
assert.LogsContain(t, hook,
"is not a checksum Ethereum address",
)
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"))
cliCtx = cli.NewContext(&app, set, nil)
err = configureExecutionSetting(cliCtx)
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
assert.Equal(t, common.HexToAddress("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"), params.BeaconConfig().DefaultFeeRecipient)
}
func TestConfigureNetwork(t *testing.T) {

View File

@@ -779,6 +779,7 @@ func (b *BeaconNode) registerRPCService() error {
PeerManager: p2pService,
MetadataProvider: p2pService,
ChainInfoFetcher: chainService,
HeadUpdater: chainService,
HeadFetcher: chainService,
CanonicalFetcher: chainService,
ForkFetcher: chainService,

View File

@@ -22,7 +22,7 @@ const repeatedSearches = 2 * searchThreshold
// BlockExists returns true if the block exists, its height and any possible error encountered.
func (s *Service) BlockExists(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockExists")
ctx, span := trace.StartSpan(ctx, "powchain.BlockExists")
defer span.End()
if exists, hdrInfo, err := s.headerCache.HeaderInfoByHash(hash); exists || err != nil {
@@ -45,24 +45,9 @@ func (s *Service) BlockExists(ctx context.Context, hash common.Hash) (bool, *big
return true, new(big.Int).Set(header.Number), nil
}
// BlockExistsWithCache returns true if the block exists in cache, its height and any possible error encountered.
func (s *Service) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
_, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockExistsWithCache")
defer span.End()
if exists, hdrInfo, err := s.headerCache.HeaderInfoByHash(hash); exists || err != nil {
if err != nil {
return false, nil, err
}
span.AddAttributes(trace.BoolAttribute("blockCacheHit", true))
return true, hdrInfo.Number, nil
}
span.AddAttributes(trace.BoolAttribute("blockCacheHit", false))
return false, nil, nil
}
// BlockHashByHeight returns the block hash of the block at the given height.
func (s *Service) BlockHashByHeight(ctx context.Context, height *big.Int) (common.Hash, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockHashByHeight")
ctx, span := trace.StartSpan(ctx, "powchain.BlockHashByHeight")
defer span.End()
if exists, hInfo, err := s.headerCache.HeaderInfoByHeight(height); exists || err != nil {
@@ -90,9 +75,9 @@ func (s *Service) BlockHashByHeight(ctx context.Context, height *big.Int) (commo
return header.Hash(), nil
}
// BlockTimeByHeight fetches an eth1.0 block timestamp by its height.
// BlockTimeByHeight fetches an eth1 block timestamp by its height.
func (s *Service) BlockTimeByHeight(ctx context.Context, height *big.Int) (uint64, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockTimeByHeight")
ctx, span := trace.StartSpan(ctx, "powchain.BlockTimeByHeight")
defer span.End()
if s.eth1DataFetcher == nil {
err := errors.New("nil eth1DataFetcher")
@@ -111,7 +96,7 @@ func (s *Service) BlockTimeByHeight(ctx context.Context, height *big.Int) (uint6
// This is an optimized version with the worst case being O(2*repeatedSearches) number of calls
// while in best case search for the block is performed in O(1).
func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.HeaderInfo, error) {
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockByTimestamp")
ctx, span := trace.StartSpan(ctx, "powchain.BlockByTimestamp")
defer span.End()
s.latestEth1DataLock.RLock()
@@ -122,15 +107,14 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
if time > latestBlkTime {
return nil, errors.Errorf("provided time is later than the current eth1 head. %d > %d", time, latestBlkTime)
}
// Initialize a pointer to eth1 chain's history to start our search
// from.
// Initialize a pointer to eth1 chain's history to start our search from.
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
cursorTime := latestBlkTime
numOfBlocks := uint64(0)
estimatedBlk := cursorNum.Uint64()
maxTimeBuffer := searchThreshold * params.BeaconConfig().SecondsPerETH1Block
// Terminate if we cant find an acceptable block after
// Terminate if we can't find an acceptable block after
// repeated searches.
for i := 0; i < repeatedSearches; i++ {
if ctx.Err() != nil {
@@ -157,12 +141,12 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
// time - buffer <= head.time <= time + buffer
break
}
hinfo, err := s.retrieveHeaderInfo(ctx, estimatedBlk)
hInfo, err := s.retrieveHeaderInfo(ctx, estimatedBlk)
if err != nil {
return nil, err
}
cursorNum = hinfo.Number
cursorTime = hinfo.Time
cursorNum = hInfo.Number
cursorTime = hInfo.Time
}
// Exit early if we get the desired block.
@@ -170,15 +154,15 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
}
if cursorTime > time {
return s.findLessTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findMaxTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
}
return s.findMoreTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findMinTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
}
// Performs a search to find a target eth1 block which is earlier than or equal to the
// target time. This method is used when head.time > targetTime
func (s *Service) findLessTargetEth1Block(ctx context.Context, startBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := startBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := upperBoundBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -194,8 +178,8 @@ func (s *Service) findLessTargetEth1Block(ctx context.Context, startBlk *big.Int
// Performs a search to find a target eth1 block which is just earlier than or equal to the
// target time. This method is used when head.time < targetTime
func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := startBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
for bn := lowerBoundBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -203,8 +187,7 @@ func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int
if err != nil {
return nil, err
}
// Return the last block before we hit the threshold
// time.
// Return the last block before we hit the threshold time.
if info.Time > targetTime {
return s.retrieveHeaderInfo(ctx, info.Number.Uint64()-1)
}

View File

@@ -61,7 +61,7 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
require.NoError(t, err)
tickerChan := make(chan time.Time)
web3Service.headTicker = &time.Ticker{C: tickerChan}
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
tickerChan <- time.Now()
web3Service.cancel()
exitRoutine <- true
@@ -207,51 +207,6 @@ func TestBlockExists_UsesCachedBlockInfo(t *testing.T) {
require.Equal(t, 0, height.Cmp(header.Number))
}
func TestBlockExistsWithCache_UsesCachedHeaderInfo(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
server, endpoint, err := mockPOW.SetupRPCServer()
require.NoError(t, err)
t.Cleanup(func() {
server.Stop()
})
web3Service, err := NewService(context.Background(),
WithHttpEndpoints([]string{endpoint}),
WithDatabase(beaconDB),
)
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
header := &gethTypes.Header{
Number: big.NewInt(0),
}
err = web3Service.headerCache.AddHeader(header)
require.NoError(t, err)
exists, height, err := web3Service.BlockExistsWithCache(context.Background(), header.Hash())
require.NoError(t, err, "Could not get block hash with given height")
require.Equal(t, true, exists)
require.Equal(t, 0, height.Cmp(header.Number))
}
func TestBlockExistsWithCache_HeaderNotCached(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
server, endpoint, err := mockPOW.SetupRPCServer()
require.NoError(t, err)
t.Cleanup(func() {
server.Stop()
})
web3Service, err := NewService(context.Background(),
WithHttpEndpoints([]string{endpoint}),
WithDatabase(beaconDB),
)
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
exists, height, err := web3Service.BlockExistsWithCache(context.Background(), common.BytesToHash([]byte("hash")))
require.NoError(t, err, "Could not get block hash with given height")
require.Equal(t, false, exists)
require.Equal(t, (*big.Int)(nil), height)
}
func TestService_BlockNumberByTimestamp(t *testing.T) {
beaconDB := dbutil.SetupDB(t)
testAcc, err := mock.Setup()
@@ -313,11 +268,11 @@ func TestService_BlockNumberByTimestampLessTargetTime(t *testing.T) {
defer cancel()
// Provide an unattainable target time
_, err = web3Service.findLessTargetEth1Block(ctx, hd.Number, hd.Time/2)
_, err = web3Service.findMaxTargetEth1Block(ctx, hd.Number, hd.Time/2)
require.ErrorContains(t, context.DeadlineExceeded.Error(), err)
// Provide an attainable target time
blk, err := web3Service.findLessTargetEth1Block(context.Background(), hd.Number, hd.Time-5)
blk, err := web3Service.findMaxTargetEth1Block(context.Background(), hd.Number, hd.Time-5)
require.NoError(t, err)
require.NotEqual(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not less than the head")
}
@@ -351,11 +306,11 @@ func TestService_BlockNumberByTimestampMoreTargetTime(t *testing.T) {
defer cancel()
// Provide an unattainable target time with respect to head
_, err = web3Service.findMoreTargetEth1Block(ctx, big.NewInt(0).Div(hd.Number, big.NewInt(2)), hd.Time)
_, err = web3Service.findMinTargetEth1Block(ctx, big.NewInt(0).Div(hd.Number, big.NewInt(2)), hd.Time)
require.ErrorContains(t, context.DeadlineExceeded.Error(), err)
// Provide an attainable target time with respect to head
blk, err := web3Service.findMoreTargetEth1Block(context.Background(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time)
blk, err := web3Service.findMinTargetEth1Block(context.Background(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time)
require.NoError(t, err)
require.Equal(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not equal to the head")
}

View File

@@ -138,7 +138,8 @@ func TestProcessDeposit_InvalidPublicKey(t *testing.T) {
deposits[0].Proof, err = trie.MerkleProof(0)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositCount: 1,
@@ -178,7 +179,8 @@ func TestProcessDeposit_InvalidSignature(t *testing.T) {
trie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositCount: 1,
@@ -213,7 +215,8 @@ func TestProcessDeposit_UnableToVerify(t *testing.T) {
trie, _, err := util.DepositTrieFromDeposits(deposits)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: root[:],
@@ -265,7 +268,8 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
trie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err)
root := trie.HashTreeRoot()
root, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data := &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: root[:],
@@ -281,7 +285,8 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
for i := 0; i < int(factor-1); i++ {
assert.NoError(t, trie.Insert(dataRoot[:], i))
trieRoot := trie.HashTreeRoot()
trieRoot, err := trie.HashTreeRoot()
require.NoError(t, err)
eth1Data.DepositRoot = trieRoot[:]
eth1Data.DepositCount = uint64(i + 1)

View File

@@ -33,6 +33,10 @@ const (
ExecutionBlockByHashMethod = "eth_getBlockByHash"
// ExecutionBlockByNumberMethod request string for JSON-RPC.
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
// Defines the seconds to wait before timing out engine endpoints with block execution semantics (newPayload, forkchoiceUpdated).
payloadAndForkchoiceUpdatedTimeout = 8 * time.Second
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
defaultEngineTimeout = time.Second
)
// ForkchoiceUpdatedResponse is the response kind received by the
@@ -65,7 +69,9 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
defer func() {
newPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
d := time.Now().Add(payloadAndForkchoiceUpdatedTimeout)
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &pb.PayloadStatus{}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payload)
if err != nil {
@@ -99,6 +105,9 @@ func (s *Service) ForkchoiceUpdated(
forkchoiceUpdatedLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
d := time.Now().Add(payloadAndForkchoiceUpdatedTimeout)
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &ForkchoiceUpdatedResponse{}
err := s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, attrs)
if err != nil {
@@ -132,6 +141,9 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte) (*pb.Execut
getPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
d := time.Now().Add(defaultEngineTimeout)
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &pb.ExecutionPayload{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
return result, handleRPCError(err)
@@ -147,10 +159,14 @@ func (s *Service) ExchangeTransitionConfiguration(
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
zeroBigNum := big.NewInt(0)
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
d := time.Now().Add(defaultEngineTimeout)
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &pb.TransitionConfiguration{}
if err := s.rpcClient.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
return handleRPCError(err)
}
// We surface an error to the user if local configuration settings mismatch
// according to the response from the execution node.
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]

View File

@@ -19,8 +19,6 @@ var (
ErrUnknownPayload = errors.New("payload does not exist or is not available")
// ErrUnknownPayloadStatus when the payload status is unknown.
ErrUnknownPayloadStatus = errors.New("unknown payload status")
// ErrUnsupportedScheme for unsupported URL schemes.
ErrUnsupportedScheme = errors.New("unsupported url scheme, only http(s) and ipc are supported")
// ErrConfigMismatch when the execution node's terminal total difficulty or
// terminal block hash received via the API mismatches Prysm's configuration value.
ErrConfigMismatch = errors.New("execution client configuration mismatch")

View File

@@ -36,7 +36,7 @@ var (
const eth1DataSavingInterval = 1000
const maxTolerableDifference = 50
const defaultEth1HeaderReqLimit = uint64(1000)
const depositlogRequestLimit = 10000
const depositLogRequestLimit = 10000
const additiveFactorMultiplier = 0.10
const multiplicativeDecreaseDivisor = 2
@@ -57,7 +57,7 @@ func (s *Service) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
return s.chainStartData.GenesisTime, big.NewInt(int64(s.chainStartData.GenesisBlock))
}
// ProcessETH1Block processes the logs from the provided eth1Block.
// ProcessETH1Block processes logs from the provided eth1 block.
func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
query := ethereum.FilterQuery{
Addresses: []common.Address{
@@ -80,7 +80,7 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
}
}
if !s.chainStartData.Chainstarted {
if err := s.checkBlockNumberForChainStart(ctx, blkNum); err != nil {
if err := s.processChainStartFromBlockNum(ctx, blkNum); err != nil {
return err
}
}
@@ -88,7 +88,7 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
}
// ProcessLog is the main method which handles the processing of all
// logs from the deposit contract on the ETH1.0 chain.
// logs from the deposit contract on the eth1 chain.
func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) error {
s.processingLock.RLock()
defer s.processingLock.RUnlock()
@@ -107,7 +107,7 @@ func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) erro
}
// ProcessDepositLog processes the log which had been received from
// the ETH1.0 chain by trying to ascertain which participant deposited
// the eth1 chain by trying to ascertain which participant deposited
// in the contract.
func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Log) error {
pubkey, withdrawalCredentials, amount, signature, merkleTreeIndex, err := contracts.UnpackDepositLogData(depositLog.Data)
@@ -140,7 +140,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
depositHash, err := depositData.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "Unable to determine hashed value of deposit")
return errors.Wrap(err, "unable to determine hashed value of deposit")
}
// Defensive check to validate incoming index.
@@ -158,20 +158,27 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
if !s.chainStartData.Chainstarted {
proof, err := s.depositTrie.MerkleProof(int(index))
if err != nil {
return errors.Wrap(err, "Unable to generate merkle proof for deposit")
return errors.Wrap(err, "unable to generate merkle proof for deposit")
}
deposit.Proof = proof
}
// We always store all historical deposits in the DB.
err = s.cfg.depositCache.InsertDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.HashTreeRoot())
root, err := s.depositTrie.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "unable to determine root of deposit trie")
}
err = s.cfg.depositCache.InsertDeposit(ctx, deposit, depositLog.BlockNumber, index, root)
if err != nil {
return errors.Wrap(err, "unable to insert deposit into cache")
}
validData := true
if !s.chainStartData.Chainstarted {
s.chainStartData.ChainstartDeposits = append(s.chainStartData.ChainstartDeposits, deposit)
root := s.depositTrie.HashTreeRoot()
root, err := s.depositTrie.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "unable to determine root of deposit trie")
}
eth1Data := &ethpb.Eth1Data{
DepositRoot: root[:],
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
@@ -181,7 +188,11 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
validData = false
}
} else {
s.cfg.depositCache.InsertPendingDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.HashTreeRoot())
root, err := s.depositTrie.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "unable to determine root of deposit trie")
}
s.cfg.depositCache.InsertPendingDeposit(ctx, deposit, depositLog.BlockNumber, index, root)
}
if validData {
log.WithFields(logrus.Fields{
@@ -215,7 +226,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
}
// ProcessChainStart processes the log which had been received from
// the ETH1.0 chain by trying to determine when to start the beacon chain.
// the eth1 chain by trying to determine when to start the beacon chain.
func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte, blockNumber *big.Int) {
s.chainStartData.Chainstarted = true
s.chainStartData.GenesisBlock = blockNumber.Uint64()
@@ -225,12 +236,16 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
for i := range s.chainStartData.ChainstartDeposits {
proof, err := s.depositTrie.MerkleProof(i)
if err != nil {
log.Errorf("Unable to generate deposit proof %v", err)
log.Errorf("unable to generate deposit proof %v", err)
}
s.chainStartData.ChainstartDeposits[i].Proof = proof
}
root := s.depositTrie.HashTreeRoot()
root, err := s.depositTrie.HashTreeRoot()
if err != nil { // This should never happen.
log.WithError(err).Error("unable to determine root of deposit trie, aborting chain start")
return
}
s.chainStartData.Eth1Data = &ethpb.Eth1Data{
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
DepositRoot: root[:],
@@ -247,15 +262,15 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
},
})
if err := s.savePowchainData(s.ctx); err != nil {
// continue on, if the save fails as this will get re-saved
// continue on if the save fails as this will get re-saved
// in the next interval.
log.Error(err)
}
}
// createGenesisTime adds in the genesis delay to the eth1 block time
// on which it was triggered.
func createGenesisTime(timeStamp uint64) uint64 {
// adds in the genesis delay to the eth1 block time
// on which it was triggered.
return timeStamp + params.BeaconConfig().GenesisDelay
}
@@ -292,7 +307,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
}
return nil
}
latestFollowHeight, err := s.followBlockHeight(ctx)
latestFollowHeight, err := s.followedBlockHeight(ctx)
if err != nil {
return err
}
@@ -318,7 +333,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
// only change the end block if the remaining logs are below the required log limit.
// reset our query and end block in this case.
withinLimit := remainingLogs < depositlogRequestLimit
withinLimit := remainingLogs < depositLogRequestLimit
aboveFollowHeight := end >= latestFollowHeight
if withinLimit && aboveFollowHeight {
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
@@ -413,9 +428,9 @@ func (s *Service) processPastLogs(ctx context.Context) error {
// logs from the period last polled to now.
func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
// We request for the nth block behind the current head, in order to have
// stabilized logs when we retrieve it from the 1.0 chain.
// stabilized logs when we retrieve it from the eth1 chain.
requestedBlock, err := s.followBlockHeight(ctx)
requestedBlock, err := s.followedBlockHeight(ctx)
if err != nil {
return err
}
@@ -457,18 +472,17 @@ func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int)
return bHash, timeStamp, nil
}
// checkBlockNumberForChainStart checks the given block number for if chainstart has occurred.
func (s *Service) checkBlockNumberForChainStart(ctx context.Context, blkNum *big.Int) error {
func (s *Service) processChainStartFromBlockNum(ctx context.Context, blkNum *big.Int) error {
bHash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
if err != nil {
return err
}
s.checkForChainstart(ctx, bHash, blkNum, timeStamp)
s.processChainStartIfReady(ctx, bHash, blkNum, timeStamp)
return nil
}
func (s *Service) checkHeaderForChainstart(ctx context.Context, header *gethTypes.Header) {
s.checkForChainstart(ctx, header.Hash(), header.Number, header.Time)
func (s *Service) processChainStartFromHeader(ctx context.Context, header *gethTypes.Header) {
s.processChainStartIfReady(ctx, header.Hash(), header.Number, header.Time)
}
func (s *Service) checkHeaderRange(ctx context.Context, start, end uint64, headersMap map[uint64]*gethTypes.Header,
@@ -484,7 +498,7 @@ func (s *Service) checkHeaderRange(ctx context.Context, start, end uint64, heade
i--
continue
}
s.checkHeaderForChainstart(ctx, h)
s.processChainStartFromHeader(ctx, h)
}
}
return nil
@@ -504,7 +518,7 @@ func (s *Service) currentCountAndTime(ctx context.Context, blockTime uint64) (ui
return valCount, createGenesisTime(blockTime)
}
func (s *Service) checkForChainstart(ctx context.Context, blockHash [32]byte, blockNumber *big.Int, blockTime uint64) {
func (s *Service) processChainStartIfReady(ctx context.Context, blockHash [32]byte, blockNumber *big.Int, blockTime uint64) {
valCount, genesisTime := s.currentCountAndTime(ctx, blockTime)
if valCount == 0 {
return
@@ -516,7 +530,7 @@ func (s *Service) checkForChainstart(ctx context.Context, blockHash [32]byte, bl
}
}
// save all powchain related metadata to disk.
// savePowchainData saves all powchain related metadata to disk.
func (s *Service) savePowchainData(ctx context.Context) error {
var pbState *ethpb.BeaconState
var err error

View File

@@ -589,7 +589,7 @@ func TestCheckForChainstart_NoValidator(t *testing.T) {
require.NoError(t, err, "Unable to set up simulated backend")
beaconDB := testDB.SetupDB(t)
s := newPowchainService(t, testAcc, beaconDB)
s.checkForChainstart(context.Background(), [32]byte{}, nil, 0)
s.processChainStartIfReady(context.Background(), [32]byte{}, nil, 0)
require.LogsDoNotContain(t, hook, "Could not determine active validator count from pre genesis state")
}

View File

@@ -13,7 +13,6 @@ import (
"sync"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -98,7 +97,6 @@ type POWBlockFetcher interface {
BlockByTimestamp(ctx context.Context, time uint64) (*types.HeaderInfo, error)
BlockHashByHeight(ctx context.Context, height *big.Int) (common.Hash, error)
BlockExists(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
}
// Chain defines a standard interface for the powchain service in Prysm.
@@ -114,7 +112,6 @@ type RPCDataFetcher interface {
Close()
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
}
// RPCClient defines the rpc methods required to interact with the eth1 node.
@@ -139,10 +136,10 @@ type config struct {
}
// Service fetches important information about the canonical
// Ethereum ETH1.0 chain via a web3 endpoint using an ethclient. The Random
// Beacon Chain requires synchronization with the ETH1.0 chain's current
// blockhash, block number, and access to logs within the
// Validator Registration Contract on the ETH1.0 chain to kick off the beacon
// eth1 chain via a web3 endpoint using an ethclient.
// The beacon chain requires synchronization with the eth1 chain's current
// block hash, block number, and access to logs within the
// Validator Registration Contract on the eth1 chain to kick off the beacon
// chain's validator registration process.
type Service struct {
connectedETH1 bool
@@ -152,7 +149,7 @@ type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
headTicker *time.Ticker
eth1HeadTicker *time.Ticker
httpLogger bind.ContractFilterer
eth1DataFetcher RPCDataFetcher
rpcClient RPCClient
@@ -173,11 +170,11 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
if err != nil {
cancel()
return nil, errors.Wrap(err, "could not setup deposit trie")
return nil, errors.Wrap(err, "could not set up deposit trie")
}
genState, err := transition.EmptyGenesisState()
if err != nil {
return nil, errors.Wrap(err, "could not setup genesis state")
return nil, errors.Wrap(err, "could not set up genesis state")
}
s := &Service{
@@ -201,7 +198,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
},
lastReceivedMerkleIndex: -1,
preGenesisState: genState,
headTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
eth1HeadTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
}
for _, opt := range opts {
@@ -225,7 +222,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
return s, nil
}
// Start a web3 service's main event loop.
// Start the powchain service's main event loop.
func (s *Service) Start() {
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
log.WithError(err).Error("Could not connect to execution endpoint")
@@ -372,7 +369,7 @@ func (s *Service) ETH1ConnectionErrors() []error {
// refers to the latest eth1 block which follows the condition: eth1_timestamp +
// SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time
func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
func (s *Service) followedBlockHeight(_ context.Context) (uint64, error) {
latestValidBlock := uint64(0)
if s.latestEth1Data.BlockHeight > params.BeaconConfig().Eth1FollowDistance {
latestValidBlock = s.latestEth1Data.BlockHeight - params.BeaconConfig().Eth1FollowDistance
@@ -386,8 +383,7 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
}
s.cfg.depositCache.InsertDepositContainers(ctx, ctrs)
if !s.chainStartData.Chainstarted {
// do not add to pending cache
// if no genesis state exists.
// Do not add to pending cache if no genesis state exists.
validDepositsCount.Add(float64(s.preGenesisState.Eth1DepositIndex()))
return nil
}
@@ -395,7 +391,7 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
if err != nil {
return err
}
// Default to all deposits post-genesis deposits in
// Default to all post-genesis deposits in
// the event we cannot find a finalized state.
currIndex := genesisState.Eth1DepositIndex()
chkPt, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
@@ -411,17 +407,17 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
// Set deposit index to the one in the current archived state.
currIndex = fState.Eth1DepositIndex()
// when a node pauses for some time and starts again, the deposits to finalize
// accumulates. we finalize them here before we are ready to receive a block.
// When a node pauses for some time and starts again, the deposits to finalize
// accumulates. We finalize them here before we are ready to receive a block.
// Otherwise, the first few blocks will be slower to compute as we will
// hold the lock and be busy finalizing the deposits.
// The deposit index in the state is always the index of the next deposit
// to be included(rather than the last one to be processed). This was most likely
// to be included (rather than the last one to be processed). This was most likely
// done as the state cannot represent signed integers.
actualIndex := int64(currIndex) - 1 // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex)
// Deposit proofs are only used during state transition and can be safely removed to save space.
// Deposit proofs are only used during state transition and can be safely removed to save space.
if err = s.cfg.depositCache.PruneProofs(ctx, actualIndex); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
@@ -498,8 +494,7 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
return headers, nil
}
// safelyHandleHeader will recover and log any panic that occurs from the
// block
// safelyHandleHeader will recover and log any panic that occurs from the block
func safelyHandlePanic() {
if r := recover(); r != nil {
log.WithFields(logrus.Fields{
@@ -522,7 +517,7 @@ func (s *Service) handleETH1FollowDistance() {
log.Warn("Execution client is not syncing")
}
if !s.chainStartData.Chainstarted {
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
if err := s.processChainStartFromBlockNum(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
s.runError = err
log.Error(err)
return
@@ -530,9 +525,8 @@ func (s *Service) handleETH1FollowDistance() {
}
// If the last requested block has not changed,
// we do not request batched logs as this means there are no new
// logs for the powchain service to process. Also is a potential
// failure condition as would mean we have not respected the protocol
// threshold.
// logs for the powchain service to process. Also it is a potential
// failure condition as would mean we have not respected the protocol threshold.
if s.latestEth1Data.LastRequestedBlock == s.latestEth1Data.BlockHeight {
log.Error("Beacon node is not respecting the follow distance")
return
@@ -595,7 +589,7 @@ func (s *Service) initPOWService() {
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 {
genHash := common.BytesToHash(s.chainStartData.Eth1Data.BlockHash)
genBlock := s.chainStartData.GenesisBlock
// In the event our provided chainstart data references a non-existent blockhash
// In the event our provided chainstart data references a non-existent block hash,
// we assume the genesis block to be 0.
if genHash != [32]byte{} {
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
@@ -618,7 +612,7 @@ func (s *Service) initPOWService() {
}
}
// run subscribes to all the services for the ETH1.0 chain.
// run subscribes to all the services for the eth1 chain.
func (s *Service) run(done <-chan struct{}) {
s.runError = nil
@@ -636,7 +630,7 @@ func (s *Service) run(done <-chan struct{}) {
s.updateConnectedETH1(false)
log.Debug("Context closed, exiting goroutine")
return
case <-s.headTicker.C:
case <-s.eth1HeadTicker.C:
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
if err != nil {
s.pollConnectionStatus(s.ctx)
@@ -692,10 +686,10 @@ func (s *Service) logTillChainStart(ctx context.Context) {
}
// cacheHeadersForEth1DataVote makes sure that voting for eth1data after startup utilizes cached headers
// instead of making multiple RPC requests to the ETH1 endpoint.
// instead of making multiple RPC requests to the eth1 endpoint.
func (s *Service) cacheHeadersForEth1DataVote(ctx context.Context) error {
// Find the end block to request from.
end, err := s.followBlockHeight(ctx)
end, err := s.followedBlockHeight(ctx)
if err != nil {
return err
}
@@ -739,12 +733,12 @@ func (s *Service) cacheBlockHeaders(start, end uint64) error {
return nil
}
// determines the earliest voting block from which to start caching all our previous headers from.
// Determines the earliest voting block from which to start caching all our previous headers from.
func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock uint64) (uint64, error) {
genesisTime := s.chainStartData.GenesisTime
currSlot := slots.CurrentSlot(genesisTime)
// In the event genesis has not occurred yet, we just request go back follow_distance blocks.
// In the event genesis has not occurred yet, we just request to go back follow_distance blocks.
if genesisTime == 0 || currSlot == 0 {
earliestBlk := uint64(0)
if followBlock > params.BeaconConfig().Eth1FollowDistance {
@@ -791,7 +785,7 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
return nil
}
// validates that all deposit containers are valid and have their relevant indices
// Validates that all deposit containers are valid and have their relevant indices
// in order.
func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
ctrLen := len(ctrs)
@@ -814,7 +808,7 @@ func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
return true
}
// validates the current powchain data saved and makes sure that any
// Validates the current powchain data is saved and makes sure that any
// embedded genesis state is correctly accounted for.
func (s *Service) ensureValidPowchainData(ctx context.Context) error {
genState, err := s.cfg.beaconDB.GenesisState(ctx)

View File

@@ -267,7 +267,7 @@ func TestFollowBlock_OK(t *testing.T) {
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
h, err := web3Service.followBlockHeight(context.Background())
h, err := web3Service.followedBlockHeight(context.Background())
require.NoError(t, err)
assert.Equal(t, baseHeight, h, "Unexpected block height")
numToForward := uint64(2)
@@ -280,7 +280,7 @@ func TestFollowBlock_OK(t *testing.T) {
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
h, err = web3Service.followBlockHeight(context.Background())
h, err = web3Service.followedBlockHeight(context.Background())
require.NoError(t, err)
assert.Equal(t, expectedHeight, h, "Unexpected block height")
}

View File

@@ -28,6 +28,7 @@ type EngineClient struct {
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
TerminalBlockHash []byte
TerminalBlockHashExists bool
OverrideValidHash [32]byte
}
// NewPayload --
@@ -37,8 +38,11 @@ func (e *EngineClient) NewPayload(_ context.Context, _ *pb.ExecutionPayload) ([]
// ForkchoiceUpdated --
func (e *EngineClient) ForkchoiceUpdated(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
_ context.Context, fcs *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) (*pb.PayloadIDBytes, []byte, error) {
if e.OverrideValidHash != [32]byte{} && bytesutil.ToBytes32(fcs.HeadBlockHash) == e.OverrideValidHash {
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, nil
}
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, e.ErrForkchoiceUpdated
}

View File

@@ -71,8 +71,3 @@ func (_ *FaultyMockPOWChain) ClearPreGenesisData() {
func (_ *FaultyMockPOWChain) IsConnectedToETH1() bool {
return true
}
// BlockExistsWithCache --
func (f *FaultyMockPOWChain) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
return f.BlockExists(ctx, hash)
}

View File

@@ -179,11 +179,6 @@ func (m *POWChain) InsertBlock(height int, time uint64, hash []byte) *POWChain {
return m
}
// BlockExistsWithCache --
func (m *POWChain) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
return m.BlockExists(ctx, hash)
}
func SetupRPCServer() (*rpc.Server, string, error) {
srv := rpc.NewServer()
if err := srv.RegisterName("eth", &testETHRPC{}); err != nil {

View File

@@ -37,4 +37,5 @@ type Server struct {
V1Alpha1ValidatorServer *v1alpha1validator.Server
SyncChecker sync.Checker
CanonicalHistory *stategen.CanonicalHistory
HeadUpdater blockchain.HeadUpdater
}

View File

@@ -14,6 +14,7 @@ import (
// providing RPC endpoints intended for validator clients.
type Server struct {
HeadFetcher blockchain.HeadFetcher
HeadUpdater blockchain.HeadUpdater
TimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
AttestationsPool attestations.Pool

View File

@@ -670,6 +670,7 @@ func TestProduceBlock(t *testing.T) {
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -774,6 +775,7 @@ func TestProduceBlockV2(t *testing.T) {
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -879,6 +881,7 @@ func TestProduceBlockV2(t *testing.T) {
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -1028,6 +1031,7 @@ func TestProduceBlockV2(t *testing.T) {
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -1174,6 +1178,7 @@ func TestProduceBlindedBlock(t *testing.T) {
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -1280,6 +1285,7 @@ func TestProduceBlindedBlock(t *testing.T) {
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -1429,6 +1435,7 @@ func TestProduceBlindedBlock(t *testing.T) {
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
HeadUpdater: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},

View File

@@ -46,4 +46,5 @@ type Server struct {
StateGen stategen.StateManager
SyncChecker sync.Checker
ReplayerBuilder stategen.ReplayerBuilder
HeadUpdater blockchain.HeadUpdater
}

View File

@@ -197,10 +197,16 @@ func (vs *Server) rebuildDepositTrie(ctx context.Context, canonicalEth1Data *eth
// validate that the provided deposit trie matches up with the canonical eth1 data provided.
func validateDepositTrie(trie *trie.SparseMerkleTrie, canonicalEth1Data *ethpb.Eth1Data) (bool, error) {
if trie == nil || canonicalEth1Data == nil {
return false, errors.New("nil trie or eth1data provided")
}
if trie.NumOfItems() != int(canonicalEth1Data.DepositCount) {
return false, errors.Errorf("wanted the canonical count of %d but received %d", canonicalEth1Data.DepositCount, trie.NumOfItems())
}
rt := trie.HashTreeRoot()
rt, err := trie.HashTreeRoot()
if err != nil {
return false, err
}
if !bytes.Equal(rt[:], canonicalEth1Data.DepositRoot) {
return false, errors.Errorf("wanted the canonical deposit root of %#x but received %#x", canonicalEth1Data.DepositRoot, rt)
}

View File

@@ -5,6 +5,7 @@ import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -130,7 +131,7 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
if bytes.Equal(feeRecipient.Bytes(), burnAddr) {
logrus.WithFields(logrus.Fields{
"validatorIndex": vIdx,
"burnAddress": burnAddr,
"burnAddress": common.BytesToAddress(burnAddr).Hex(),
}).Error("Fee recipient not set. Using burn address")
}
default:

View File

@@ -82,6 +82,10 @@ func (vs *Server) buildPhase0BlockData(ctx context.Context, req *ethpb.BlockRequ
return nil, fmt.Errorf("syncing to latest head, not ready to respond")
}
if err := vs.HeadUpdater.UpdateHead(ctx); err != nil {
log.WithError(err).Error("Could not process attestations and update head")
}
// Retrieve the parent block as the current head of the canonical chain.
parentRoot, err := vs.HeadFetcher.HeadRoot(ctx)
if err != nil {

View File

@@ -72,6 +72,7 @@ func TestProposer_GetBlock_OK(t *testing.T) {
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -158,6 +159,7 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
HeadUpdater: &mock.ChainService{},
MockEth1Votes: true,
SlashingsPool: slashings.NewPool(),
AttPool: attestations.NewPool(),
@@ -503,10 +505,14 @@ func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
}
blk := util.NewBeaconBlock()
@@ -638,10 +644,14 @@ func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
}
bs := &Server{
@@ -737,10 +747,14 @@ func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testin
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root)
}
bs := &Server{
@@ -833,10 +847,14 @@ func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, height.Uint64(), dp.Index, root)
}
bs := &Server{
@@ -927,10 +945,14 @@ func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root)
}
bs := &Server{
@@ -1036,10 +1058,14 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
}
bs := &Server{
@@ -1055,8 +1081,10 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
trie, err := bs.depositTrie(ctx, &ethpb.Eth1Data{}, big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)))
require.NoError(t, err)
actualRoot := trie.HashTreeRoot()
expectedRoot := depositTrie.HashTreeRoot()
actualRoot, err := trie.HashTreeRoot()
require.NoError(t, err)
expectedRoot, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, expectedRoot, actualRoot, "Incorrect deposit trie root")
}
@@ -1146,10 +1174,14 @@ func TestProposer_DepositTrie_RebuildTrie(t *testing.T) {
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, dp.Eth1BlockHeight, dp.Index, root)
}
d := depositCache.AllDepositContainers(ctx)
origDeposit, ok := proto.Clone(d[0].Deposit).(*ethpb.Deposit)
@@ -1177,8 +1209,10 @@ func TestProposer_DepositTrie_RebuildTrie(t *testing.T) {
trie, err := bs.depositTrie(ctx, &ethpb.Eth1Data{}, big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)))
require.NoError(t, err)
expectedRoot := depositTrie.HashTreeRoot()
actualRoot := trie.HashTreeRoot()
expectedRoot, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
actualRoot, err := trie.HashTreeRoot()
require.NoError(t, err)
assert.Equal(t, expectedRoot, actualRoot, "Incorrect deposit trie root")
}
@@ -1230,7 +1264,8 @@ func TestProposer_ValidateDepositTrie(t *testing.T) {
assert.NoError(t, trie.Insert([]byte{'a'}, 0))
assert.NoError(t, trie.Insert([]byte{'b'}, 1))
assert.NoError(t, trie.Insert([]byte{'c'}, 2))
rt := trie.HashTreeRoot()
rt, err := trie.HashTreeRoot()
require.NoError(t, err)
return &ethpb.Eth1Data{DepositRoot: rt[:], DepositCount: 3, BlockHash: []byte{}}
},
trieCreator: func() *trie.SparseMerkleTrie {
@@ -1274,7 +1309,9 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
require.NoError(t, err)
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dc.Deposit, dc.Eth1BlockHeight, dc.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dc.Deposit, dc.Eth1BlockHeight, dc.Index, root))
t.Run("choose highest count", func(t *testing.T) {
t.Skip()
@@ -1976,10 +2013,14 @@ func TestProposer_Deposits_ReturnsEmptyList_IfLatestEth1DataEqGenesisEth1Block(t
require.NoError(t, err, "Unable to determine hashed value of deposit")
assert.NoError(t, depositTrie.Insert(depositHash[:], int(dp.Index)))
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root))
}
for _, dp := range recentDeposits {
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, depositTrie.HashTreeRoot())
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
depositCache.InsertPendingDeposit(ctx, dp.Deposit, uint64(dp.Index), dp.Index, root)
}
bs := &Server{
@@ -2066,6 +2107,7 @@ func TestProposer_GetBeaconBlock_PreForkEpoch(t *testing.T) {
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -2178,6 +2220,7 @@ func TestProposer_GetBeaconBlock_PostForkEpoch(t *testing.T) {
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
@@ -2331,6 +2374,7 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mock.ChainService{},
HeadUpdater: &mock.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: c,

View File

@@ -43,6 +43,7 @@ type Server struct {
AttestationCache *cache.AttestationCache
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
HeadFetcher blockchain.HeadFetcher
HeadUpdater blockchain.HeadUpdater
ForkFetcher blockchain.ForkFetcher
FinalizationFetcher blockchain.FinalizationFetcher
TimeFetcher blockchain.TimeFetcher

View File

@@ -137,7 +137,9 @@ func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 10 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 10 /*blockNum*/, 0, root))
trie, err := v1.InitializeFromProtoUnsafe(beaconState)
require.NoError(t, err)
vs := &Server{

View File

@@ -41,7 +41,9 @@ func TestValidatorStatus_DepositedEth1(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -84,7 +86,9 @@ func TestValidatorStatus_Deposited(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -134,7 +138,9 @@ func TestValidatorStatus_PartiallyDeposited(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -202,7 +208,9 @@ func TestValidatorStatus_Pending(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
@@ -247,7 +255,9 @@ func TestValidatorStatus_Active(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
// Active because activation epoch <= current epoch < exit epoch.
activeEpoch := helpers.ActivationExitEpoch(0)
@@ -334,7 +344,9 @@ func TestValidatorStatus_Exiting(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -391,7 +403,9 @@ func TestValidatorStatus_Slashing(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -449,7 +463,9 @@ func TestValidatorStatus_Exited(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
@@ -532,11 +548,15 @@ func TestActivationStatus_OK(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, root))
dep = deposits[2]
assert.NoError(t, depositTrie.Insert(dep.Data.Signature, 15))
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, depositTrie.HashTreeRoot()))
root, err = depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, root))
vs := &Server{
Ctx: context.Background(),
@@ -677,7 +697,9 @@ func TestValidatorStatus_CorrectActivationQueue(t *testing.T) {
deposit := &ethpb.Deposit{
Data: depData,
}
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, int64(i), depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, int64(i), root))
}
@@ -758,10 +780,14 @@ func TestMultipleValidatorStatus_Pubkeys(t *testing.T) {
require.NoError(t, err)
dep := deposits[0]
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, dep, 10 /*blockNum*/, 0, root))
dep = deposits[2]
assert.NoError(t, depositTrie.Insert(dep.Data.Signature, 15))
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, depositTrie.HashTreeRoot()))
root, err = depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, root))
vs := &Server{
Ctx: context.Background(),
@@ -918,7 +944,9 @@ func TestValidatorStatus_Invalid(t *testing.T) {
depositCache, err := depositcache.New()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.HashTreeRoot()))
root, err := depositTrie.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, root))
height := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{

View File

@@ -78,6 +78,7 @@ type Config struct {
BeaconMonitoringPort int
BeaconDB db.HeadAccessDatabase
ChainInfoFetcher blockchain.ChainInfoFetcher
HeadUpdater blockchain.HeadUpdater
HeadFetcher blockchain.HeadFetcher
CanonicalFetcher blockchain.CanonicalFetcher
ForkFetcher blockchain.ForkFetcher
@@ -189,6 +190,7 @@ func (s *Service) Start() {
AttPool: s.cfg.AttestationsPool,
ExitPool: s.cfg.ExitPool,
HeadFetcher: s.cfg.HeadFetcher,
HeadUpdater: s.cfg.HeadUpdater,
ForkFetcher: s.cfg.ForkFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
TimeFetcher: s.cfg.GenesisTimeFetcher,
@@ -215,6 +217,7 @@ func (s *Service) Start() {
}
validatorServerV1 := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
HeadUpdater: s.cfg.HeadUpdater,
TimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
AttestationsPool: s.cfg.AttestationsPool,
@@ -261,6 +264,7 @@ func (s *Service) Start() {
BeaconDB: s.cfg.BeaconDB,
AttestationsPool: s.cfg.AttestationsPool,
SlashingsPool: s.cfg.SlashingsPool,
HeadUpdater: s.cfg.HeadUpdater,
HeadFetcher: s.cfg.HeadFetcher,
FinalizationFetcher: s.cfg.FinalizationFetcher,
CanonicalFetcher: s.cfg.CanonicalFetcher,

View File

@@ -68,6 +68,7 @@ go_library(
"//encoding/ssz:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@io_opencensus_go//trace:go_default_library",

View File

@@ -154,7 +154,8 @@ func (b *BeaconState) NumValidators() int {
}
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
//
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
if b.validators == nil {
return errors.New("nil validators in state")

View File

@@ -1,36 +1,18 @@
package state_native
import (
ssz "github.com/ferranbt/fastssz"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
)
var errAssertionFailed = errors.New("failed to convert interface to proto state")
var errUnsupportedVersion = errors.New("unsupported beacon state version")
func (b *BeaconState) MarshalSSZ() ([]byte, error) {
proto := b.ToProto()
switch b.Version() {
case version.Phase0:
s, ok := proto.(*ethpb.BeaconState)
if !ok {
return nil, errAssertionFailed
}
return s.MarshalSSZ()
case version.Altair:
s, ok := proto.(*ethpb.BeaconStateAltair)
if !ok {
return nil, errAssertionFailed
}
return s.MarshalSSZ()
case version.Bellatrix:
s, ok := proto.(*ethpb.BeaconStateBellatrix)
if !ok {
return nil, errAssertionFailed
}
return s.MarshalSSZ()
default:
return nil, errUnsupportedVersion
s, ok := proto.(ssz.Marshaler)
if !ok {
return nil, errAssertionFailed
}
return s.MarshalSSZ()
}

View File

@@ -541,6 +541,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
}
// Initializes the Merkle layers for the beacon state if they are empty.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
if len(b.merkleLayers) > 0 {
@@ -565,6 +566,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
}
// Recomputes the Merkle layers for the dirty fields in the state.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
for field := range b.dirtyFields {

View File

@@ -8,16 +8,16 @@ import (
var ErrNotInCache = errors.New("state not found in cache")
type CachedGetter interface {
ByRoot([32]byte) (state.BeaconState, error)
ByBlockRoot([32]byte) (state.BeaconState, error)
}
type CombinedCache struct {
getters []CachedGetter
}
func (c CombinedCache) ByRoot(root [32]byte) (state.BeaconState, error) {
func (c CombinedCache) ByBlockRoot(r [32]byte) (state.BeaconState, error) {
for _, getter := range c.getters {
st, err := getter.ByRoot(root)
st, err := getter.ByBlockRoot(r)
if err == nil {
return st, nil
}

View File

@@ -20,8 +20,8 @@ var (
// slotRootInfo specifies the slot root info in the epoch boundary state cache.
type slotRootInfo struct {
slot types.Slot
root [32]byte
slot types.Slot
blockRoot [32]byte
}
// slotKeyFn takes the string representation of the slot to be used as key
@@ -66,9 +66,9 @@ func newBoundaryStateCache() *epochBoundaryState {
}
}
// ByRoot satisfies the CachedGetter interface
func (e *epochBoundaryState) ByRoot(r [32]byte) (state.BeaconState, error) {
rsi, ok, err := e.getByRoot(r)
// ByBlockRoot satisfies the CachedGetter interface
func (e *epochBoundaryState) ByBlockRoot(r [32]byte) (state.BeaconState, error) {
rsi, ok, err := e.getByBlockRoot(r)
if err != nil {
return nil, err
}
@@ -79,14 +79,14 @@ func (e *epochBoundaryState) ByRoot(r [32]byte) (state.BeaconState, error) {
}
// get epoch boundary state by its block root. Returns copied state in state info object if exists. Otherwise returns nil.
func (e *epochBoundaryState) getByRoot(r [32]byte) (*rootStateInfo, bool, error) {
func (e *epochBoundaryState) getByBlockRoot(r [32]byte) (*rootStateInfo, bool, error) {
e.lock.RLock()
defer e.lock.RUnlock()
return e.getByRootLockFree(r)
return e.getByBlockRootLockFree(r)
}
func (e *epochBoundaryState) getByRootLockFree(r [32]byte) (*rootStateInfo, bool, error) {
func (e *epochBoundaryState) getByBlockRootLockFree(r [32]byte) (*rootStateInfo, bool, error) {
obj, exists, err := e.rootStateCache.GetByKey(string(r[:]))
if err != nil {
return nil, false, err
@@ -122,24 +122,24 @@ func (e *epochBoundaryState) getBySlot(s types.Slot) (*rootStateInfo, bool, erro
return nil, false, errNotSlotRootInfo
}
return e.getByRootLockFree(info.root)
return e.getByBlockRootLockFree(info.blockRoot)
}
// put adds a state to the epoch boundary state cache. This method also trims the
// least recently added state info if the cache size has reached the max cache
// size limit.
func (e *epochBoundaryState) put(r [32]byte, s state.BeaconState) error {
func (e *epochBoundaryState) put(blockRoot [32]byte, s state.BeaconState) error {
e.lock.Lock()
defer e.lock.Unlock()
if err := e.slotRootCache.AddIfNotPresent(&slotRootInfo{
slot: s.Slot(),
root: r,
slot: s.Slot(),
blockRoot: blockRoot,
}); err != nil {
return err
}
if err := e.rootStateCache.AddIfNotPresent(&rootStateInfo{
root: r,
root: blockRoot,
state: s.Copy(),
}); err != nil {
return err
@@ -152,11 +152,11 @@ func (e *epochBoundaryState) put(r [32]byte, s state.BeaconState) error {
}
// delete the state from the epoch boundary state cache.
func (e *epochBoundaryState) delete(r [32]byte) error {
func (e *epochBoundaryState) delete(blockRoot [32]byte) error {
e.lock.Lock()
defer e.lock.Unlock()
return e.rootStateCache.Delete(&rootStateInfo{
root: r,
root: blockRoot,
})
}

View File

@@ -27,12 +27,12 @@ func TestEpochBoundaryStateCache_CanSaveAndDelete(t *testing.T) {
r := [32]byte{'a'}
require.NoError(t, e.put(r, s))
got, exists, err := e.getByRoot([32]byte{'b'})
got, exists, err := e.getByBlockRoot([32]byte{'b'})
require.NoError(t, err)
assert.Equal(t, false, exists, "Should not exist")
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")
got, exists, err = e.getByRoot([32]byte{'a'})
got, exists, err = e.getByBlockRoot([32]byte{'a'})
require.NoError(t, err)
assert.Equal(t, true, exists, "Should exist")
assert.DeepSSZEqual(t, s.InnerStateUnsafe(), got.state.InnerStateUnsafe(), "Should have the same state")
@@ -48,7 +48,7 @@ func TestEpochBoundaryStateCache_CanSaveAndDelete(t *testing.T) {
assert.DeepSSZEqual(t, s.InnerStateUnsafe(), got.state.InnerStateUnsafe(), "Should have the same state")
require.NoError(t, e.delete(r))
got, exists, err = e.getByRoot([32]byte{'b'})
got, exists, err = e.getByBlockRoot([32]byte{'b'})
require.NoError(t, err)
assert.Equal(t, false, exists, "Should not exist")
assert.Equal(t, (*rootStateInfo)(nil), got, "Should not exist")

View File

@@ -28,24 +28,23 @@ func (s *State) HasState(ctx context.Context, blockRoot [32]byte) (bool, error)
}
// HasStateInCache returns true if the state exists in cache.
func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool, error) {
func (s *State) HasStateInCache(_ context.Context, blockRoot [32]byte) (bool, error) {
if s.hotStateCache.has(blockRoot) {
return true, nil
}
_, has, err := s.epochBoundaryStateCache.getByRoot(blockRoot)
_, has, err := s.epochBoundaryStateCache.getByBlockRoot(blockRoot)
if err != nil {
return false, err
}
return has, nil
}
// StateByRootIfCachedNoCopy retrieves a state using the input block root only if the state is already in the cache
// StateByRootIfCachedNoCopy retrieves a state using the input block root only if the state is already in the cache.
func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState {
if !s.hotStateCache.has(blockRoot) {
return nil
}
state := s.hotStateCache.getWithoutCopy(blockRoot)
return state
return s.hotStateCache.getWithoutCopy(blockRoot)
}
// StateByRoot retrieves the state using input block root.
@@ -53,7 +52,7 @@ func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.Beac
ctx, span := trace.StartSpan(ctx, "stateGen.StateByRoot")
defer span.End()
// Genesis case. If block root is zero hash, short circuit to use genesis cachedState stored in DB.
// Genesis case. If block root is zero hash, short circuit to use genesis state stored in DB.
if blockRoot == params.BeaconConfig().ZeroHash {
return s.beaconDB.GenesisState(ctx)
}
@@ -62,23 +61,25 @@ func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.Beac
// StateByRootInitialSync retrieves the state from the DB for the initial syncing phase.
// It assumes initial syncing using a block list rather than a block tree hence the returned
// state is not copied.
// It invalidates cache for parent root because pre state will get mutated.
// Do not use this method for anything other than initial syncing purpose or block tree is applied.
// state is not copied (block batches returned from initial sync are linear).
// It invalidates cache for parent root because pre-state will get mutated.
//
// WARNING: Do not use this method for anything other than initial syncing purpose or block tree is applied.
func (s *State) StateByRootInitialSync(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
// Genesis case. If block root is zero hash, short circuit to use genesis state stored in DB.
if blockRoot == params.BeaconConfig().ZeroHash {
return s.beaconDB.GenesisState(ctx)
}
// To invalidate cache for parent root because pre state will get mutated.
// To invalidate cache for parent root because pre-state will get mutated.
// It is a parent root because StateByRootInitialSync is always used to fetch the block's parent state.
defer s.hotStateCache.delete(blockRoot)
if s.hotStateCache.has(blockRoot) {
return s.hotStateCache.getWithoutCopy(blockRoot), nil
}
cachedInfo, ok, err := s.epochBoundaryStateCache.getByRoot(blockRoot)
cachedInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(blockRoot)
if err != nil {
return nil, err
}
@@ -113,8 +114,7 @@ func (s *State) StateByRootInitialSync(ctx context.Context, blockRoot [32]byte)
return startState, nil
}
// This returns the state summary object of a given block root, it first checks the cache
// then checks the DB. An error is returned if state summary object is nil.
// This returns the state summary object of a given block root. It first checks the cache, then checks the DB.
func (s *State) stateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
var summary *ethpb.StateSummary
var err error
@@ -152,7 +152,7 @@ func (s *State) DeleteStateFromCaches(_ context.Context, blockRoot [32]byte) err
return s.epochBoundaryStateCache.delete(blockRoot)
}
// This loads a beacon state from either the cache or DB then replay blocks up the requested block root.
// This loads a beacon state from either the cache or DB, then replays blocks up the slot of the requested block root.
func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadStateByRoot")
defer span.End()
@@ -163,8 +163,8 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
return cachedState, nil
}
// Second, it checks if the state exits in epoch boundary state cache.
cachedInfo, ok, err := s.epochBoundaryStateCache.getByRoot(blockRoot)
// Second, it checks if the state exists in epoch boundary state cache.
cachedInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(blockRoot)
if err != nil {
return nil, err
}
@@ -172,7 +172,7 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
return cachedInfo.state, nil
}
// Short cut if the cachedState is already in the DB.
// Short circuit if the state is already in the DB.
if s.beaconDB.HasState(ctx, blockRoot) {
return s.beaconDB.State(ctx, blockRoot)
}
@@ -183,8 +183,8 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
}
targetSlot := summary.Slot
// Since the requested state is not in caches, start replaying using the last available ancestor state which is
// retrieved using input block's parent root.
// Since the requested state is not in caches or DB, start replaying using the last
// available ancestor state which is retrieved using input block's root.
startState, err := s.LastAncestorState(ctx, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get ancestor state")
@@ -193,7 +193,6 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
return nil, errUnknownBoundaryState
}
// Return state early if we are retrieving it from our finalized state cache.
if startState.Slot() == targetSlot {
return startState, nil
}
@@ -208,23 +207,23 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (state.
return s.ReplayBlocks(ctx, startState, blks, targetSlot)
}
// This returns the highest available ancestor state of the input block root.
// It recursively look up block's parent until a corresponding state of the block root
// LastAncestorState returns the highest available ancestor state of the input block root.
// It recursively looks up block's parent until a corresponding state of the block root
// is found in the caches or DB.
//
// There's three ways to derive block parent state:
// 1.) block parent state is the last finalized state
// 2.) block parent state is the epoch boundary state and exists in epoch boundary cache.
// 3.) block parent state is in DB.
func (s *State) LastAncestorState(ctx context.Context, root [32]byte) (state.BeaconState, error) {
// 1) block parent state is the last finalized state
// 2) block parent state is the epoch boundary state and exists in epoch boundary cache
// 3) block parent state is in DB
func (s *State) LastAncestorState(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.LastAncestorState")
defer span.End()
if s.isFinalizedRoot(root) && s.finalizedState() != nil {
if s.isFinalizedRoot(blockRoot) && s.finalizedState() != nil {
return s.finalizedState(), nil
}
b, err := s.beaconDB.Block(ctx, root)
b, err := s.beaconDB.Block(ctx, blockRoot)
if err != nil {
return nil, err
}
@@ -237,13 +236,13 @@ func (s *State) LastAncestorState(ctx context.Context, root [32]byte) (state.Bea
return nil, ctx.Err()
}
// Is the state a genesis state.
// Is the state the genesis state.
parentRoot := bytesutil.ToBytes32(b.Block().ParentRoot())
if parentRoot == params.BeaconConfig().ZeroHash {
return s.beaconDB.GenesisState(ctx)
}
// return an error if slot hasn't been covered by checkpoint sync backfill
// Return an error if slot hasn't been covered by checkpoint sync.
ps := b.Block().Slot() - 1
if !s.slotAvailable(ps) {
return nil, errors.Wrapf(ErrNoDataForSlot, "slot %d not in db due to checkpoint sync", ps)
@@ -259,7 +258,7 @@ func (s *State) LastAncestorState(ctx context.Context, root [32]byte) (state.Bea
}
// Does the state exist in epoch boundary cache.
cachedInfo, ok, err := s.epochBoundaryStateCache.getByRoot(parentRoot)
cachedInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(parentRoot)
if err != nil {
return nil, err
}
@@ -271,6 +270,7 @@ func (s *State) LastAncestorState(ctx context.Context, root [32]byte) (state.Bea
if s.beaconDB.HasState(ctx, parentRoot) {
return s.beaconDB.State(ctx, parentRoot)
}
b, err = s.beaconDB.Block(ctx, parentRoot)
if err != nil {
return nil, err

View File

@@ -174,7 +174,7 @@ func TestDeleteStateFromCaches(t *testing.T) {
r := [32]byte{'A'}
require.Equal(t, false, service.hotStateCache.has(r))
_, has, err := service.epochBoundaryStateCache.getByRoot(r)
_, has, err := service.epochBoundaryStateCache.getByBlockRoot(r)
require.NoError(t, err)
require.Equal(t, false, has)
@@ -182,14 +182,14 @@ func TestDeleteStateFromCaches(t *testing.T) {
require.NoError(t, service.epochBoundaryStateCache.put(r, beaconState))
require.Equal(t, true, service.hotStateCache.has(r))
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
_, has, err = service.epochBoundaryStateCache.getByBlockRoot(r)
require.NoError(t, err)
require.Equal(t, true, has)
require.NoError(t, service.DeleteStateFromCaches(ctx, r))
require.Equal(t, false, service.hotStateCache.has(r))
_, has, err = service.epochBoundaryStateCache.getByRoot(r)
_, has, err = service.epochBoundaryStateCache.getByBlockRoot(r)
require.NoError(t, err)
require.Equal(t, false, has)
}

View File

@@ -42,8 +42,8 @@ type CanonicalHistory struct {
cache CachedGetter
}
func (ch *CanonicalHistory) ReplayerForSlot(target types.Slot) Replayer {
return &stateReplayer{chainer: ch, method: forSlot, target: target}
func (c *CanonicalHistory) ReplayerForSlot(target types.Slot) Replayer {
return &stateReplayer{chainer: c, method: forSlot, target: target}
}
func (c *CanonicalHistory) BlockForSlot(ctx context.Context, target types.Slot) ([32]byte, interfaces.SignedBeaconBlock, error) {
@@ -134,9 +134,9 @@ func (c *CanonicalHistory) chainForSlot(ctx context.Context, target types.Slot)
return s, descendants, nil
}
func (c *CanonicalHistory) getState(ctx context.Context, root [32]byte) (state.BeaconState, error) {
func (c *CanonicalHistory) getState(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
if c.cache != nil {
st, err := c.cache.ByRoot(root)
st, err := c.cache.ByBlockRoot(blockRoot)
if err == nil {
return st, nil
}
@@ -144,7 +144,7 @@ func (c *CanonicalHistory) getState(ctx context.Context, root [32]byte) (state.B
return nil, errors.Wrap(err, "error reading from state cache during state replay")
}
}
return c.h.StateOrError(ctx, root)
return c.h.StateOrError(ctx, blockRoot)
}
// ancestorChain works backwards through the chain lineage, accumulating blocks and checking for a saved state.

View File

@@ -24,7 +24,7 @@ var (
})
)
// hotStateCache is used to store the processed beacon state after finalized check point..
// hotStateCache is used to store the processed beacon state after finalized check point.
type hotStateCache struct {
cache *lru.Cache
lock sync.RWMutex
@@ -39,10 +39,10 @@ func newHotStateCache() *hotStateCache {
// Get returns a cached response via input block root, if any.
// The response is copied by default.
func (c *hotStateCache) get(root [32]byte) state.BeaconState {
func (c *hotStateCache) get(blockRoot [32]byte) state.BeaconState {
c.lock.RLock()
defer c.lock.RUnlock()
item, exists := c.cache.Get(root)
item, exists := c.cache.Get(blockRoot)
if exists && item != nil {
hotStateCacheHit.Inc()
@@ -52,8 +52,8 @@ func (c *hotStateCache) get(root [32]byte) state.BeaconState {
return nil
}
func (c *hotStateCache) ByRoot(root [32]byte) (state.BeaconState, error) {
st := c.get(root)
func (c *hotStateCache) ByBlockRoot(r [32]byte) (state.BeaconState, error) {
st := c.get(r)
if st == nil {
return nil, ErrNotInCache
}
@@ -61,10 +61,10 @@ func (c *hotStateCache) ByRoot(root [32]byte) (state.BeaconState, error) {
}
// GetWithoutCopy returns a non-copied cached response via input block root.
func (c *hotStateCache) getWithoutCopy(root [32]byte) state.BeaconState {
func (c *hotStateCache) getWithoutCopy(blockRoot [32]byte) state.BeaconState {
c.lock.RLock()
defer c.lock.RUnlock()
item, exists := c.cache.Get(root)
item, exists := c.cache.Get(blockRoot)
if exists && item != nil {
hotStateCacheHit.Inc()
return item.(state.BeaconState)
@@ -74,22 +74,22 @@ func (c *hotStateCache) getWithoutCopy(root [32]byte) state.BeaconState {
}
// put the response in the cache.
func (c *hotStateCache) put(root [32]byte, state state.BeaconState) {
func (c *hotStateCache) put(blockRoot [32]byte, state state.BeaconState) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(root, state)
c.cache.Add(blockRoot, state)
}
// has returns true if the key exists in the cache.
func (c *hotStateCache) has(root [32]byte) bool {
func (c *hotStateCache) has(blockRoot [32]byte) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.cache.Contains(root)
return c.cache.Contains(blockRoot)
}
// delete deletes the key exists in the cache.
func (c *hotStateCache) delete(root [32]byte) bool {
func (c *hotStateCache) delete(blockRoot [32]byte) bool {
c.lock.Lock()
defer c.lock.Unlock()
return c.cache.Remove(root)
return c.cache.Remove(blockRoot)
}

View File

@@ -13,7 +13,7 @@ import (
// MigrateToCold advances the finalized info in between the cold and hot state sections.
// It moves the recent finalized states from the hot section to the cold section and
// only preserve the ones that's on archived point.
// only preserves the ones that are on archived point.
func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "stateGen.MigrateToCold")
defer span.End()
@@ -31,7 +31,7 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
return nil
}
// Start at previous finalized slot, stop at current finalized slot.
// Start at previous finalized slot, stop at current finalized slot (it will be handled in the next migration).
// If the slot is on archived point, save the state of that slot to the DB.
for slot := oldFSlot; slot < fSlot; slot++ {
if ctx.Err() != nil {
@@ -69,7 +69,7 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
return err
}
aRoot = missingRoot
// There's no need to generate the state if the state already exists on the DB.
// There's no need to generate the state if the state already exists in the DB.
// We can skip saving the state.
if !s.beaconDB.HasState(ctx, aRoot) {
aState, err = s.StateByRoot(ctx, missingRoot)
@@ -80,13 +80,14 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
}
if s.beaconDB.HasState(ctx, aRoot) {
// Remove hot state DB root to prevent it gets deleted later when we turn hot state save DB mode off.
// If you are migrating a state and its already part of the hot state cache saved to the db,
// you can just remove it from the hot state cache as it becomes redundant.
s.saveHotStateDB.lock.Lock()
roots := s.saveHotStateDB.savedStateRoots
roots := s.saveHotStateDB.blockRootsOfSavedStates
for i := 0; i < len(roots); i++ {
if aRoot == roots[i] {
s.saveHotStateDB.savedStateRoots = append(roots[:i], roots[i+1:]...)
// There shouldn't be duplicated roots in `savedStateRoots`.
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
// Break here is ok.
break
}
@@ -107,7 +108,7 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
}
// Update finalized info in memory.
fInfo, ok, err := s.epochBoundaryStateCache.getByRoot(fRoot)
fInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(fRoot)
if err != nil {
return err
}

View File

@@ -142,8 +142,8 @@ func TestMigrateToCold_StateExistsInDB(t *testing.T) {
require.NoError(t, service.epochBoundaryStateCache.put(fRoot, beaconState))
require.NoError(t, service.beaconDB.SaveState(ctx, beaconState, fRoot))
service.saveHotStateDB.savedStateRoots = [][32]byte{{1}, {2}, {3}, {4}, fRoot}
service.saveHotStateDB.blockRootsOfSavedStates = [][32]byte{{1}, {2}, {3}, {4}, fRoot}
require.NoError(t, service.MigrateToCold(ctx, fRoot))
assert.DeepEqual(t, [][32]byte{{1}, {2}, {3}, {4}}, service.saveHotStateDB.savedStateRoots)
assert.DeepEqual(t, [][32]byte{{1}, {2}, {3}, {4}}, service.saveHotStateDB.blockRootsOfSavedStates)
assert.LogsDoNotContain(t, hook, "Saved state in DB")
}

View File

@@ -271,7 +271,7 @@ type mockCachedGetter struct {
cache map[[32]byte]state.BeaconState
}
func (m mockCachedGetter) ByRoot(root [32]byte) (state.BeaconState, error) {
func (m mockCachedGetter) ByBlockRoot(root [32]byte) (state.BeaconState, error) {
st, ok := m.cache[root]
if !ok {
return nil, ErrNotInCache

View File

@@ -23,6 +23,8 @@ import (
)
// ReplayBlocks replays the input blocks on the input state until the target slot is reached.
//
// WARNING Blocks passed to the function must be in decreasing slots order.
func (_ *State) ReplayBlocks(
ctx context.Context,
state state.BeaconState,
@@ -59,7 +61,7 @@ func (_ *State) ReplayBlocks(
}
}
// If there is skip slots at the end.
// If there are skip slots at the end.
if targetSlot > state.Slot() {
state, err = ReplayProcessSlots(ctx, state, targetSlot)
if err != nil {
@@ -134,6 +136,7 @@ func (s *State) LoadBlocks(ctx context.Context, startSlot, endSlot types.Slot, e
// executeStateTransitionStateGen applies state transition on input historical state and block for state gen usages.
// There's no signature verification involved given state gen only works with stored block and state in DB.
// If the objects are already in stored in DB, one can omit redundant signature checks and ssz hashing calculations.
//
// WARNING: This method should not be used on an unverified new block.
func executeStateTransitionStateGen(
ctx context.Context,
@@ -146,12 +149,12 @@ func executeStateTransitionStateGen(
if err := helpers.BeaconBlockIsNil(signed); err != nil {
return nil, err
}
ctx, span := trace.StartSpan(ctx, "stategen.ExecuteStateTransitionStateGen")
ctx, span := trace.StartSpan(ctx, "stategen.executeStateTransitionStateGen")
defer span.End()
var err error
// Execute per slots transition.
// Given this is for state gen, a node uses the version process slots without skip slots cache.
// Given this is for state gen, a node uses the version of process slots without skip slots cache.
state, err = ReplayProcessSlots(ctx, state, signed.Block().Slot())
if err != nil {
return nil, errors.Wrap(err, "could not process slot")
@@ -169,6 +172,7 @@ func executeStateTransitionStateGen(
// ReplayProcessSlots to process old slots for state gen usages.
// There's no skip slot cache involved given state gen only works with already stored block and state in DB.
//
// WARNING: This method should not be used for future slot.
func ReplayProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stategen.ReplayProcessSlots")
@@ -178,7 +182,7 @@ func ReplayProcessSlots(ctx context.Context, state state.BeaconState, slot types
}
if state.Slot() > slot {
err := fmt.Errorf("expected state.slot %d < slot %d", state.Slot(), slot)
err := fmt.Errorf("expected state.slot %d <= slot %d", state.Slot(), slot)
return nil, err
}
@@ -207,7 +211,7 @@ func ReplayProcessSlots(ctx context.Context, state state.BeaconState, slot types
return nil, errors.Wrap(err, "could not process epoch")
}
default:
return nil, errors.New("beacon state should have a version")
return nil, fmt.Errorf("unsupported beacon state version: %s", version.String(state.Version()))
}
}
if err := state.SetSlot(state.Slot() + 1); err != nil {

View File

@@ -35,7 +35,7 @@ type StateManager interface {
StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState
StateByRootInitialSync(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
SaveState(ctx context.Context, root [32]byte, st state.BeaconState) error
SaveState(ctx context.Context, blockRoot [32]byte, st state.BeaconState) error
ForceCheckpoint(ctx context.Context, root []byte) error
EnableSaveHotStateToDB(_ context.Context)
DisableSaveHotStateToDB(ctx context.Context) error
@@ -57,10 +57,10 @@ type State struct {
// how often does the node save hot states to db? what are
// the saved hot states in db?... etc
type saveHotStateDbConfig struct {
enabled bool
lock sync.Mutex
duration types.Slot
savedStateRoots [][32]byte
enabled bool
lock sync.Mutex
duration types.Slot
blockRootsOfSavedStates [][32]byte
}
// This tracks the finalized point. It's also the point where slot and the block root of
@@ -100,7 +100,7 @@ func New(beaconDB db.NoHeadAccessDatabase, opts ...StateGenOption) *State {
return s
}
// Resume resumes a new state management object from previously saved finalized check point in DB.
// Resume resumes a new state management object from previously saved finalized checkpoint in DB.
func (s *State) Resume(ctx context.Context, fState state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.Resume")
defer span.End()

View File

@@ -14,22 +14,24 @@ import (
)
// SaveState saves the state in the cache and/or DB.
func (s *State) SaveState(ctx context.Context, root [32]byte, st state.BeaconState) error {
func (s *State) SaveState(ctx context.Context, blockRoot [32]byte, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "stateGen.SaveState")
defer span.End()
return s.saveStateByRoot(ctx, root, st)
return s.saveStateByRoot(ctx, blockRoot, st)
}
// ForceCheckpoint initiates a cold state save of the given state. This method does not update the
// ForceCheckpoint initiates a cold state save of the given block root's state. This method does not update the
// "last archived state" but simply saves the specified state from the root argument into the DB.
func (s *State) ForceCheckpoint(ctx context.Context, root []byte) error {
//
// The name "Checkpoint" isn't referring to checkpoint in the sense of our consensus type, but checkpoint for our historical states.
func (s *State) ForceCheckpoint(ctx context.Context, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "stateGen.ForceCheckpoint")
defer span.End()
root32 := bytesutil.ToBytes32(root)
// Before the first finalized check point, the finalized root is zero hash.
// Return early if there hasn't been a finalized check point.
root32 := bytesutil.ToBytes32(blockRoot)
// Before the first finalized checkpoint, the finalized root is zero hash.
// Return early if there hasn't been a finalized checkpoint.
if root32 == params.BeaconConfig().ZeroHash {
return nil
}
@@ -58,11 +60,11 @@ func (s *State) saveStateByRoot(ctx context.Context, blockRoot [32]byte, st stat
s.saveHotStateDB.lock.Unlock()
return err
}
s.saveHotStateDB.savedStateRoots = append(s.saveHotStateDB.savedStateRoots, blockRoot)
s.saveHotStateDB.blockRootsOfSavedStates = append(s.saveHotStateDB.blockRootsOfSavedStates, blockRoot)
log.WithFields(logrus.Fields{
"slot": st.Slot(),
"totalHotStateSavedInDB": len(s.saveHotStateDB.savedStateRoots),
"totalHotStateSavedInDB": len(s.saveHotStateDB.blockRootsOfSavedStates),
}).Info("Saving hot state to DB")
}
s.saveHotStateDB.lock.Unlock()
@@ -72,14 +74,14 @@ func (s *State) saveStateByRoot(ctx context.Context, blockRoot [32]byte, st stat
return nil
}
// Only on an epoch boundary slot, saves epoch boundary state in epoch boundary root state cache.
// Only on an epoch boundary slot, save epoch boundary state in epoch boundary root state cache.
if slots.IsEpochStart(st.Slot()) {
if err := s.epochBoundaryStateCache.put(blockRoot, st); err != nil {
return err
}
}
// On an intermediate slots, save state summary.
// On an intermediate slot, save state summary.
if err := s.beaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: st.Slot(),
Root: blockRoot[:],
@@ -121,15 +123,15 @@ func (s *State) DisableSaveHotStateToDB(ctx context.Context) error {
log.WithFields(logrus.Fields{
"enabled": s.saveHotStateDB.enabled,
"deletedHotStates": len(s.saveHotStateDB.savedStateRoots),
"deletedHotStates": len(s.saveHotStateDB.blockRootsOfSavedStates),
}).Warn("Exiting mode to save hot states in DB")
// Delete previous saved states in DB as we are turning this mode off.
s.saveHotStateDB.enabled = false
if err := s.beaconDB.DeleteStates(ctx, s.saveHotStateDB.savedStateRoots); err != nil {
if err := s.beaconDB.DeleteStates(ctx, s.saveHotStateDB.blockRootsOfSavedStates); err != nil {
return err
}
s.saveHotStateDB.savedStateRoots = nil
s.saveHotStateDB.blockRootsOfSavedStates = nil
return nil
}

View File

@@ -27,7 +27,7 @@ func TestSaveState_HotStateCanBeSaved(t *testing.T) {
require.NoError(t, service.SaveState(ctx, r, beaconState))
// Should save both state and state summary.
_, ok, err := service.epochBoundaryStateCache.getByRoot(r)
_, ok, err := service.epochBoundaryStateCache.getByBlockRoot(r)
require.NoError(t, err)
assert.Equal(t, true, ok, "Should have saved the state")
assert.Equal(t, true, service.beaconDB.HasStateSummary(ctx, r), "Should have saved the state summary")
@@ -105,7 +105,7 @@ func TestSaveState_CanSaveOnEpochBoundary(t *testing.T) {
require.NoError(t, service.saveStateByRoot(ctx, r, beaconState))
// Should save both state and state summary.
_, ok, err := service.epochBoundaryStateCache.getByRoot(r)
_, ok, err := service.epochBoundaryStateCache.getByBlockRoot(r)
require.NoError(t, err)
require.Equal(t, true, ok, "Did not save epoch boundary state")
assert.Equal(t, true, service.beaconDB.HasStateSummary(ctx, r), "Should have saved the state summary")
@@ -190,11 +190,11 @@ func TestEnableSaveHotStateToDB_Disabled(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveHotStateDB.savedStateRoots = [][32]byte{r}
service.saveHotStateDB.blockRootsOfSavedStates = [][32]byte{r}
require.NoError(t, service.DisableSaveHotStateToDB(ctx))
require.LogsContain(t, hook, "Exiting mode to save hot states in DB")
require.Equal(t, false, service.saveHotStateDB.enabled)
require.Equal(t, 0, len(service.saveHotStateDB.savedStateRoots))
require.Equal(t, 0, len(service.saveHotStateDB.blockRootsOfSavedStates))
}
func TestEnableSaveHotStateToDB_AlreadyDisabled(t *testing.T) {

View File

@@ -66,13 +66,16 @@ go_test(
"readonly_validator_test.go",
"references_test.go",
"setters_attestation_test.go",
"state_fuzz_test.go",
"state_test.go",
"state_trie_test.go",
"types_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//beacon-chain/state/types:go_default_library",
@@ -80,6 +83,7 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/interop:go_default_library",

View File

@@ -174,7 +174,8 @@ func (b *BeaconState) NumValidators() int {
}
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
//
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
if !b.hasInnerState() {
return ErrNilInnerState

View File

@@ -0,0 +1,88 @@
//go:build go1.18
// +build go1.18
package v1_test
import (
"context"
"testing"
coreState "github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/crypto/rand"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/util"
)
func FuzzV1StateHashTreeRoot(f *testing.F) {
gState, _ := util.DeterministicGenesisState(f, 100)
output, err := gState.MarshalSSZ()
assert.NoError(f, err)
randPool := make([]byte, 100)
_, err = rand.NewDeterministicGenerator().Read(randPool)
assert.NoError(f, err)
f.Add(randPool, uint64(10))
f.Fuzz(func(t *testing.T, diffBuffer []byte, slotsToTransition uint64) {
stateSSZ := bytesutil.SafeCopyBytes(output)
for i := 0; i < len(diffBuffer); i += 9 {
if i+8 >= len(diffBuffer) {
return
}
num := bytesutil.BytesToUint64BigEndian(diffBuffer[i : i+8])
num %= uint64(len(diffBuffer))
// Perform a XOR on the byte of the selected index.
stateSSZ[num] ^= diffBuffer[i+8]
}
pbState := &ethpb.BeaconState{}
err := pbState.UnmarshalSSZ(stateSSZ)
if err != nil {
return
}
nativeState, err := native.InitializeFromProtoPhase0(pbState)
assert.NoError(t, err)
slotsToTransition %= 100
stateObj, err := v1.InitializeFromProtoUnsafe(pbState)
assert.NoError(t, err)
for stateObj.Slot() < types.Slot(slotsToTransition) {
stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1)
assert.NoError(t, err)
stateObj.Copy()
nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1)
assert.NoError(t, err)
nativeState.Copy()
}
assert.NoError(t, err)
// Perform a cold HTR calculation by initializing a new state.
innerState, ok := stateObj.InnerStateUnsafe().(*ethpb.BeaconState)
assert.Equal(t, true, ok, "inner state is a not a beacon state proto")
newState, err := v1.InitializeFromProtoUnsafe(innerState)
assert.NoError(t, err)
newRt, newErr := newState.HashTreeRoot(context.Background())
rt, err := stateObj.HashTreeRoot(context.Background())
nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background())
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.Equal(t, rt, newRt)
assert.Equal(t, rt, nativeRt)
}
newSSZ, newErr := newState.MarshalSSZ()
stateObjSSZ, err := stateObj.MarshalSSZ()
nativeSSZ, nativeErr := nativeState.MarshalSSZ()
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.DeepEqual(t, newSSZ, stateObjSSZ)
assert.DeepEqual(t, newSSZ, nativeSSZ)
}
})
}

View File

@@ -213,6 +213,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
}
// Initializes the Merkle layers for the beacon state if they are empty.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
if len(b.merkleLayers) > 0 {
@@ -229,6 +230,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
}
// Recomputes the Merkle layers for the dirty fields in the state.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
for field := range b.dirtyFields {

View File

@@ -66,11 +66,15 @@ go_test(
"proofs_test.go",
"references_test.go",
"setters_test.go",
"state_fuzz_test.go",
"state_trie_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//beacon-chain/state/types:go_default_library",
@@ -79,6 +83,7 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -175,7 +175,8 @@ func (b *BeaconState) NumValidators() int {
}
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
//
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
if !b.hasInnerState() {
return ErrNilInnerState

View File

@@ -0,0 +1,90 @@
//go:build go1.18
// +build go1.18
package v2_test
import (
"context"
"testing"
coreState "github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/crypto/rand"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/util"
)
func FuzzV2StateHashTreeRoot(f *testing.F) {
gState, _ := util.DeterministicGenesisStateAltair(f, 100)
output, err := gState.MarshalSSZ()
assert.NoError(f, err)
randPool := make([]byte, 100)
_, err = rand.NewDeterministicGenerator().Read(randPool)
assert.NoError(f, err)
f.Add(randPool, uint64(10))
f.Fuzz(func(t *testing.T, diffBuffer []byte, slotsToTransition uint64) {
stateSSZ := bytesutil.SafeCopyBytes(output)
for i := 0; i < len(diffBuffer); i += 9 {
if i+8 >= len(diffBuffer) {
return
}
num := bytesutil.BytesToUint64BigEndian(diffBuffer[i : i+8])
num %= uint64(len(diffBuffer))
// Perform a XOR on the byte of the selected index.
stateSSZ[num] ^= diffBuffer[i+8]
}
pbState := &ethpb.BeaconStateAltair{}
err := pbState.UnmarshalSSZ(stateSSZ)
if err != nil {
return
}
nativeState, err := native.InitializeFromProtoAltair(pbState)
if err != nil {
return
}
slotsToTransition %= 100
stateObj, err := v2.InitializeFromProtoUnsafe(pbState)
assert.NoError(t, err)
for stateObj.Slot() < types.Slot(slotsToTransition) {
stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1)
assert.NoError(t, err)
stateObj.Copy()
nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1)
assert.NoError(t, err)
nativeState.Copy()
}
assert.NoError(t, err)
// Perform a cold HTR calculation by initializing a new state.
innerState, ok := stateObj.InnerStateUnsafe().(*ethpb.BeaconStateAltair)
assert.Equal(t, true, ok, "inner state is a not a beacon state altair proto")
newState, err := v2.InitializeFromProtoUnsafe(innerState)
assert.NoError(t, err)
newRt, newErr := newState.HashTreeRoot(context.Background())
rt, err := stateObj.HashTreeRoot(context.Background())
nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background())
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.Equal(t, rt, newRt)
assert.Equal(t, rt, nativeRt)
}
newSSZ, newErr := newState.MarshalSSZ()
stateObjSSZ, err := stateObj.MarshalSSZ()
nativeSSZ, nativeErr := nativeState.MarshalSSZ()
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.DeepEqual(t, newSSZ, stateObjSSZ)
assert.DeepEqual(t, newSSZ, nativeSSZ)
}
})
}

View File

@@ -218,6 +218,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
}
// Initializes the Merkle layers for the beacon state if they are empty.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
if len(b.merkleLayers) > 0 {
@@ -234,6 +235,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
}
// Recomputes the Merkle layers for the dirty fields in the state.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) recomputeDirtyFields(ctx context.Context) error {
for field := range b.dirtyFields {

View File

@@ -0,0 +1,3 @@
go test fuzz v1
[]byte("")
uint64(117)

View File

@@ -68,11 +68,14 @@ go_test(
"proofs_test.go",
"references_test.go",
"setters_test.go",
"state_fuzz_test.go",
"state_trie_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//beacon-chain/state/types:go_default_library",
@@ -81,6 +84,7 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -175,7 +175,8 @@ func (b *BeaconState) NumValidators() int {
}
// ReadFromEveryValidator reads values from every validator and applies it to the provided function.
// Warning: This method is potentially unsafe, as it exposes the actual validator registry.
//
// WARNING: This method is potentially unsafe, as it exposes the actual validator registry.
func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error {
if !b.hasInnerState() {
return ErrNilInnerState

View File

@@ -0,0 +1,90 @@
//go:build go1.18
// +build go1.18
package v3_test
import (
"context"
"testing"
coreState "github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
native "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/crypto/rand"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/util"
)
func FuzzV3StateHashTreeRoot(f *testing.F) {
gState, _ := util.DeterministicGenesisStateBellatrix(f, 100)
output, err := gState.MarshalSSZ()
assert.NoError(f, err)
randPool := make([]byte, 100)
_, err = rand.NewDeterministicGenerator().Read(randPool)
assert.NoError(f, err)
f.Add(randPool, uint64(10))
f.Fuzz(func(t *testing.T, diffBuffer []byte, slotsToTransition uint64) {
stateSSZ := bytesutil.SafeCopyBytes(output)
for i := 0; i < len(diffBuffer); i += 9 {
if i+8 >= len(diffBuffer) {
return
}
num := bytesutil.BytesToUint64BigEndian(diffBuffer[i : i+8])
num %= uint64(len(diffBuffer))
// Perform a XOR on the byte of the selected index.
stateSSZ[num] ^= diffBuffer[i+8]
}
pbState := &ethpb.BeaconStateBellatrix{}
err := pbState.UnmarshalSSZ(stateSSZ)
if err != nil {
return
}
nativeState, err := native.InitializeFromProtoBellatrix(pbState)
if err != nil {
return
}
slotsToTransition %= 100
stateObj, err := v3.InitializeFromProtoUnsafe(pbState)
assert.NoError(t, err)
for stateObj.Slot() < types.Slot(slotsToTransition) {
stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1)
assert.NoError(t, err)
stateObj.Copy()
nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1)
assert.NoError(t, err)
nativeState.Copy()
}
assert.NoError(t, err)
// Perform a cold HTR calculation by initializing a new state.
innerState, ok := stateObj.InnerStateUnsafe().(*ethpb.BeaconStateBellatrix)
assert.Equal(t, true, ok, "inner state is a not a beacon state bellatrix proto")
newState, err := v3.InitializeFromProtoUnsafe(innerState)
assert.NoError(t, err)
newRt, newErr := newState.HashTreeRoot(context.Background())
rt, err := stateObj.HashTreeRoot(context.Background())
nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background())
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.Equal(t, rt, newRt)
assert.Equal(t, rt, nativeRt)
}
newSSZ, newErr := newState.MarshalSSZ()
stateObjSSZ, err := stateObj.MarshalSSZ()
nativeSSZ, nativeErr := nativeState.MarshalSSZ()
assert.Equal(t, newErr != nil, err != nil)
assert.Equal(t, newErr != nil, nativeErr != nil)
if err == nil {
assert.DeepEqual(t, newSSZ, stateObjSSZ)
assert.DeepEqual(t, newSSZ, nativeSSZ)
}
})
}

View File

@@ -218,6 +218,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
}
// Initializes the Merkle layers for the beacon state if they are empty.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
if len(b.merkleLayers) > 0 {
@@ -234,6 +235,7 @@ func (b *BeaconState) initializeMerkleLayers(ctx context.Context) error {
}
// Recomputes the Merkle layers for the dirty fields in the state.
//
// WARNING: Caller must acquire the mutex before using.
func (b *BeaconState) recomputeDirtyFields(_ context.Context) error {
for field := range b.dirtyFields {

View File

@@ -149,6 +149,7 @@ go_test(
"subscriber_beacon_blocks_test.go",
"subscriber_test.go",
"subscription_topic_handler_test.go",
"sync_fuzz_test.go",
"sync_test.go",
"utils_test.go",
"validate_aggregate_proof_test.go",

View File

@@ -0,0 +1,281 @@
//go:build go1.18
// +build go1.18
package sync
import (
"bytes"
"context"
"reflect"
"testing"
"time"
"github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
gcache "github.com/patrickmn/go-cache"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func FuzzValidateBeaconBlockPubSub_Phase0(f *testing.F) {
db := dbtest.SetupDB(f)
p := p2ptest.NewFuzzTestP2P()
ctx := context.Background()
beaconState, privKeys := util.DeterministicGenesisState(f, 100)
parentBlock := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(parentBlock)
require.NoError(f, err)
require.NoError(f, db.SaveBlock(ctx, wsb))
bRoot, err := parentBlock.Block.HashTreeRoot()
require.NoError(f, err)
require.NoError(f, db.SaveState(ctx, beaconState, bRoot))
require.NoError(f, db.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bRoot[:]}))
copied := beaconState.Copy()
require.NoError(f, copied.SetSlot(1))
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
require.NoError(f, err)
msg := util.NewBeaconBlock()
msg.Block.ParentRoot = bRoot[:]
msg.Block.Slot = 1
msg.Block.ProposerIndex = proposerIdx
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(f, err)
stateGen := stategen.New(db)
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r := &Service{
cfg: &config{
beaconDB: db,
p2p: p,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
blockNotifier: chainService.BlockNotifier(),
stateGen: stateGen,
},
seenBlockCache: lruwrpr.New(10),
badBlockCache: lruwrpr.New(10),
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
}
buf := new(bytes.Buffer)
_, err = p.Encoding().EncodeGossip(buf, msg)
require.NoError(f, err)
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
digest, err := r.currentForkDigest()
assert.NoError(f, err)
topic = r.addDigestToTopic(topic, digest)
f.Add("junk", []byte("junk"), buf.Bytes(), []byte(topic))
f.Fuzz(func(t *testing.T, pid string, from, data, topic []byte) {
r.cfg.p2p = p2ptest.NewFuzzTestP2P()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
cService := &mock.ChainService{
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot*10000000), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r.cfg.chain = cService
r.cfg.blockNotifier = cService.BlockNotifier()
strTop := string(topic)
msg := &pubsub.Message{
Message: &pb.Message{
From: from,
Data: data,
Topic: &strTop,
},
}
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
_ = err
})
}
func FuzzValidateBeaconBlockPubSub_Altair(f *testing.F) {
db := dbtest.SetupDB(f)
p := p2ptest.NewFuzzTestP2P()
ctx := context.Background()
beaconState, privKeys := util.DeterministicGenesisStateAltair(f, 100)
parentBlock := util.NewBeaconBlockAltair()
wsb, err := wrapper.WrappedSignedBeaconBlock(parentBlock)
require.NoError(f, err)
require.NoError(f, db.SaveBlock(ctx, wsb))
bRoot, err := parentBlock.Block.HashTreeRoot()
require.NoError(f, err)
require.NoError(f, db.SaveState(ctx, beaconState, bRoot))
require.NoError(f, db.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bRoot[:]}))
copied := beaconState.Copy()
require.NoError(f, copied.SetSlot(1))
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
require.NoError(f, err)
msg := util.NewBeaconBlock()
msg.Block.ParentRoot = bRoot[:]
msg.Block.Slot = 1
msg.Block.ProposerIndex = proposerIdx
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(f, err)
stateGen := stategen.New(db)
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r := &Service{
cfg: &config{
beaconDB: db,
p2p: p,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
blockNotifier: chainService.BlockNotifier(),
stateGen: stateGen,
},
seenBlockCache: lruwrpr.New(10),
badBlockCache: lruwrpr.New(10),
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
}
buf := new(bytes.Buffer)
_, err = p.Encoding().EncodeGossip(buf, msg)
require.NoError(f, err)
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
digest, err := r.currentForkDigest()
assert.NoError(f, err)
topic = r.addDigestToTopic(topic, digest)
f.Add("junk", []byte("junk"), buf.Bytes(), []byte(topic))
f.Fuzz(func(t *testing.T, pid string, from, data, topic []byte) {
r.cfg.p2p = p2ptest.NewFuzzTestP2P()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
cService := &mock.ChainService{
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot*10000000), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r.cfg.chain = cService
r.cfg.blockNotifier = cService.BlockNotifier()
strTop := string(topic)
msg := &pubsub.Message{
Message: &pb.Message{
From: from,
Data: data,
Topic: &strTop,
},
}
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
_ = err
})
}
func FuzzValidateBeaconBlockPubSub_Bellatrix(f *testing.F) {
db := dbtest.SetupDB(f)
p := p2ptest.NewFuzzTestP2P()
ctx := context.Background()
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(f, 100)
parentBlock := util.NewBeaconBlockBellatrix()
wsb, err := wrapper.WrappedSignedBeaconBlock(parentBlock)
require.NoError(f, err)
require.NoError(f, db.SaveBlock(ctx, wsb))
bRoot, err := parentBlock.Block.HashTreeRoot()
require.NoError(f, err)
require.NoError(f, db.SaveState(ctx, beaconState, bRoot))
require.NoError(f, db.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bRoot[:]}))
copied := beaconState.Copy()
require.NoError(f, copied.SetSlot(1))
proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied)
require.NoError(f, err)
msg := util.NewBeaconBlock()
msg.Block.ParentRoot = bRoot[:]
msg.Block.Slot = 1
msg.Block.ProposerIndex = proposerIdx
msg.Signature, err = signing.ComputeDomainAndSign(beaconState, 0, msg.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
require.NoError(f, err)
stateGen := stategen.New(db)
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r := &Service{
cfg: &config{
beaconDB: db,
p2p: p,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: chainService,
blockNotifier: chainService.BlockNotifier(),
stateGen: stateGen,
},
seenBlockCache: lruwrpr.New(10),
badBlockCache: lruwrpr.New(10),
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
seenPendingBlocks: make(map[[32]byte]bool),
}
buf := new(bytes.Buffer)
_, err = p.Encoding().EncodeGossip(buf, msg)
require.NoError(f, err)
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
digest, err := r.currentForkDigest()
assert.NoError(f, err)
topic = r.addDigestToTopic(topic, digest)
f.Add("junk", []byte("junk"), buf.Bytes(), []byte(topic))
f.Fuzz(func(t *testing.T, pid string, from, data, topic []byte) {
r.cfg.p2p = p2ptest.NewFuzzTestP2P()
r.rateLimiter = newRateLimiter(r.cfg.p2p)
cService := &mock.ChainService{
Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot*10000000), 0),
State: beaconState,
FinalizedCheckPoint: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
DB: db,
}
r.cfg.chain = cService
r.cfg.blockNotifier = cService.BlockNotifier()
strTop := string(topic)
msg := &pubsub.Message{
Message: &pb.Message{
From: from,
Data: data,
Topic: &strTop,
},
}
_, err := r.validateBeaconBlockPubSub(ctx, peer.ID(pid), msg)
_ = err
})
}

View File

@@ -25,6 +25,7 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//params:go_default_library",
"@com_github_mohae_deepcopy//:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"math"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
)
@@ -75,7 +76,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
FarFutureSlot: math.MaxUint64,
BaseRewardsPerEpoch: 4,
DepositContractTreeDepth: 32,
GenesisDelay: 604800, // 1 week.
GenesisDelay: 300, // 1 week.
// Misc constant.
TargetCommitteeSize: 128,
@@ -84,8 +85,8 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MinPerEpochChurnLimit: 4,
ChurnLimitQuotient: 1 << 16,
ShuffleRoundCount: 90,
MinGenesisActiveValidatorCount: 16384,
MinGenesisTime: 1606824000, // Dec 1, 2020, 12pm UTC.
MinGenesisActiveValidatorCount: 95000,
MinGenesisTime: 1647007200, // Dec 1, 2020, 12pm UTC.
TargetAggregatorsPerCommittee: 16,
HysteresisQuotient: 4,
HysteresisDownwardMultiplier: 1,
@@ -113,7 +114,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MinValidatorWithdrawabilityDelay: 256,
ShardCommitteePeriod: 256,
MinEpochsToInactivityPenalty: 4,
Eth1FollowDistance: 2048,
Eth1FollowDistance: 16,
SafeSlotsToUpdateJustified: 8,
// Fork choice algorithm constants.
@@ -121,9 +122,9 @@ var mainnetBeaconConfig = &BeaconChainConfig{
IntervalsPerSlot: 3,
// Ethereum PoW parameters.
DepositChainID: 1, // Chain ID of eth1 mainnet.
DepositNetworkID: 1, // Network ID of eth1 mainnet.
DepositContractAddress: "0x00000000219ab540356cBB839Cbe05303d7705Fa",
DepositChainID: 1337802, // Chain ID of eth1 mainnet.
DepositNetworkID: 1337802, // Network ID of eth1 mainnet.
DepositContractAddress: "0x4242424242424242424242424242424242424242",
// Validator params.
RandomSubnetsPerValidator: 1 << 0,
@@ -183,7 +184,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxPeersToSync: 15,
SlotsPerArchivedPoint: 2048,
GenesisCountdownInterval: time.Minute,
ConfigName: MainnetName,
ConfigName: "kiln",
PresetBase: "mainnet",
BeaconStateFieldCount: 21,
BeaconStateAltairFieldCount: 24,
@@ -199,11 +200,11 @@ var mainnetBeaconConfig = &BeaconChainConfig{
// Fork related values.
GenesisEpoch: genesisForkEpoch,
GenesisForkVersion: []byte{0, 0, 0, 0},
AltairForkVersion: []byte{1, 0, 0, 0},
AltairForkEpoch: mainnetAltairForkEpoch,
BellatrixForkVersion: []byte{2, 0, 0, 0},
BellatrixForkEpoch: mainnetBellatrixForkEpoch,
GenesisForkVersion: hexutil.MustDecode("0x70000069"),
AltairForkVersion: hexutil.MustDecode("0x70000070"),
AltairForkEpoch: 50,
BellatrixForkVersion: hexutil.MustDecode("0x70000071"),
BellatrixForkEpoch: 150,
ShardingForkVersion: []byte{3, 0, 0, 0},
ShardingForkEpoch: math.MaxUint64,
@@ -245,5 +246,5 @@ var mainnetBeaconConfig = &BeaconChainConfig{
// Bellatrix
TerminalBlockHashActivationEpoch: 18446744073709551615,
TerminalBlockHash: [32]byte{},
TerminalTotalDifficulty: "115792089237316195423570985008687907853269984665640564039457584007913129638912",
TerminalTotalDifficulty: "20000000000000",
}

View File

@@ -22,7 +22,8 @@ func UseE2EMainnetConfig() {
}
// E2ETestConfig retrieves the configurations made specifically for E2E testing.
// Warning: This config is only for testing, it is not meant for use outside of E2E.
//
// WARNING: This config is only for testing, it is not meant for use outside of E2E.
func E2ETestConfig() *BeaconChainConfig {
e2eConfig := MinimalSpecConfig()

View File

@@ -19,7 +19,11 @@ go_library(
go_test(
name = "go_default_test",
size = "small",
srcs = ["sparse_merkle_test.go"],
srcs = [
"sparse_merkle_test.go",
"sparse_merkle_trie_fuzz_test.go",
],
data = glob(["testdata/**"]),
deps = [
":go_default_library",
"//config/fieldparams:go_default_library",
@@ -31,5 +35,6 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//accounts/abi/bind:go_default_library",
"@com_github_golang_protobuf//proto:go_default_library",
],
)

View File

@@ -81,7 +81,11 @@ func (m *SparseMerkleTrie) Items() [][]byte {
// HashTreeRoot of the Merkle trie as defined in the deposit contract.
// Spec Definition:
// sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24)))
func (m *SparseMerkleTrie) HashTreeRoot() [32]byte {
func (m *SparseMerkleTrie) HashTreeRoot() ([32]byte, error) {
if len(m.branches) == 0 || len(m.branches[len(m.branches)-1]) == 0 {
return [32]byte{}, errors.New("invalid branches provided to compute root")
}
enc := [32]byte{}
depositCount := uint64(len(m.originalItems))
if len(m.originalItems) == 1 && bytes.Equal(m.originalItems[0], ZeroHashes[0][:]) {
@@ -89,7 +93,7 @@ func (m *SparseMerkleTrie) HashTreeRoot() [32]byte {
depositCount = 0
}
binary.LittleEndian.PutUint64(enc[:], depositCount)
return hash.Hash(append(m.branches[len(m.branches)-1][0], enc[:]...))
return hash.Hash(append(m.branches[len(m.branches)-1][0], enc[:]...)), nil
}
// Insert an item into the trie.
@@ -97,6 +101,12 @@ func (m *SparseMerkleTrie) Insert(item []byte, index int) error {
if index < 0 {
return fmt.Errorf("negative index provided: %d", index)
}
if len(m.branches) == 0 {
return errors.New("invalid trie: no branches")
}
if m.depth > uint(len(m.branches)) {
return errors.New("invalid trie: depth is greater than number of branches")
}
for index >= len(m.branches[0]) {
m.branches[0] = append(m.branches[0], ZeroHashes[0][:])
}
@@ -143,11 +153,17 @@ func (m *SparseMerkleTrie) MerkleProof(index int) ([][]byte, error) {
if index < 0 {
return nil, fmt.Errorf("merkle index is negative: %d", index)
}
merkleIndex := uint(index)
if len(m.branches) == 0 {
return nil, errors.New("invalid trie: no branches")
}
leaves := m.branches[0]
if index >= len(leaves) {
return nil, fmt.Errorf("merkle index out of range in trie, max range: %d, received: %d", len(leaves), index)
}
if m.depth > uint(len(m.branches)) {
return nil, errors.New("invalid trie: depth is greater than number of branches")
}
merkleIndex := uint(index)
proof := make([][]byte, m.depth+1)
for i := uint(0); i < m.depth; i++ {
subIndex := (merkleIndex / (1 << i)) ^ 1
@@ -185,6 +201,9 @@ func VerifyMerkleProofWithDepth(root, item []byte, merkleIndex uint64, proof [][
if uint64(len(proof)) != depth+1 {
return false
}
if depth >= 64 {
return false // PowerOf2 would overflow.
}
node := bytesutil.ToBytes32(item)
for i := uint64(0); i <= depth; i++ {
if (merkleIndex / math.PowerOf2(i) % 2) != 0 {
@@ -200,15 +219,10 @@ func VerifyMerkleProofWithDepth(root, item []byte, merkleIndex uint64, proof [][
// VerifyMerkleProof given a trie root, a leaf, the generalized merkle index
// of the leaf in the trie, and the proof itself.
func VerifyMerkleProof(root, item []byte, merkleIndex uint64, proof [][]byte) bool {
node := bytesutil.ToBytes32(item)
for i := 0; i < len(proof); i++ {
if (merkleIndex / math.PowerOf2(uint64(i)) % 2) != 0 {
node = hash.Hash(append(proof[i], node[:]...))
} else {
node = hash.Hash(append(node[:], proof[i]...))
}
if len(proof) == 0 {
return false
}
return bytes.Equal(root, node[:])
return VerifyMerkleProofWithDepth(root, item, merkleIndex, proof, uint64(len(proof)-1))
}
// Copy performs a deep copy of the trie.

View File

@@ -79,7 +79,9 @@ func TestMerkleTrieRoot_EmptyTrie(t *testing.T) {
depRoot, err := testAccount.Contract.GetDepositRoot(&bind.CallOpts{})
require.NoError(t, err)
require.DeepEqual(t, depRoot, trie.HashTreeRoot())
root, err := trie.HashTreeRoot()
require.NoError(t, err)
require.DeepEqual(t, depRoot, root)
}
func TestGenerateTrieFromItems_NoItemsProvided(t *testing.T) {
@@ -104,7 +106,8 @@ func TestMerkleTrie_VerifyMerkleProofWithDepth(t *testing.T) {
proof, err := m.MerkleProof(0)
require.NoError(t, err)
require.Equal(t, int(params.BeaconConfig().DepositContractTreeDepth)+1, len(proof))
root := m.HashTreeRoot()
root, err := m.HashTreeRoot()
require.NoError(t, err)
if ok := trie.VerifyMerkleProofWithDepth(root[:], items[0], 0, proof, params.BeaconConfig().DepositContractTreeDepth); !ok {
t.Error("First Merkle proof did not verify")
}
@@ -130,7 +133,8 @@ func TestMerkleTrie_VerifyMerkleProof(t *testing.T) {
proof, err := m.MerkleProof(0)
require.NoError(t, err)
require.Equal(t, int(params.BeaconConfig().DepositContractTreeDepth)+1, len(proof))
root := m.HashTreeRoot()
root, err := m.HashTreeRoot()
require.NoError(t, err)
if ok := trie.VerifyMerkleProof(root[:], items[0], 0, proof); !ok {
t.Error("First Merkle proof did not verify")
}
@@ -170,14 +174,16 @@ func TestMerkleTrie_VerifyMerkleProof_TrieUpdated(t *testing.T) {
require.NoError(t, err)
proof, err := m.MerkleProof(0)
require.NoError(t, err)
root := m.HashTreeRoot()
root, err := m.HashTreeRoot()
require.NoError(t, err)
require.Equal(t, true, trie.VerifyMerkleProofWithDepth(root[:], items[0], 0, proof, depth))
// Now we update the trie.
assert.NoError(t, m.Insert([]byte{5}, 3))
proof, err = m.MerkleProof(3)
require.NoError(t, err)
root = m.HashTreeRoot()
root, err = m.HashTreeRoot()
require.NoError(t, err)
if ok := trie.VerifyMerkleProofWithDepth(root[:], []byte{5}, 3, proof, depth); !ok {
t.Error("Second Merkle proof did not verify")
}
@@ -200,10 +206,13 @@ func TestRoundtripProto_OK(t *testing.T) {
require.NoError(t, err)
protoTrie := m.ToProto()
depositRoot := m.HashTreeRoot()
depositRoot, err := m.HashTreeRoot()
require.NoError(t, err)
newTrie := trie.CreateTrieFromProto(protoTrie)
require.DeepEqual(t, depositRoot, newTrie.HashTreeRoot())
root, err := newTrie.HashTreeRoot()
require.NoError(t, err)
require.DeepEqual(t, depositRoot, root)
}
@@ -221,8 +230,11 @@ func TestCopy_OK(t *testing.T) {
if copiedTrie == source {
t.Errorf("Original trie returned.")
}
copyHash := copiedTrie.HashTreeRoot()
require.DeepEqual(t, copyHash, copiedTrie.HashTreeRoot())
a, err := copiedTrie.HashTreeRoot()
require.NoError(t, err)
b, err := source.HashTreeRoot()
require.NoError(t, err)
require.DeepEqual(t, a, b)
}
func BenchmarkGenerateTrieFromItems(b *testing.B) {
@@ -296,7 +308,8 @@ func BenchmarkVerifyMerkleProofWithDepth(b *testing.B) {
proof, err := m.MerkleProof(2)
require.NoError(b, err)
root := m.HashTreeRoot()
root, err := m.HashTreeRoot()
require.NoError(b, err)
b.StartTimer()
for i := 0; i < b.N; i++ {
if ok := trie.VerifyMerkleProofWithDepth(root[:], items[2], 2, proof, params.BeaconConfig().DepositContractTreeDepth); !ok {

View File

@@ -0,0 +1,148 @@
package trie_test
import (
"testing"
"github.com/golang/protobuf/proto"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/container/trie"
"github.com/prysmaticlabs/prysm/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func FuzzSparseMerkleTrie_HashTreeRoot(f *testing.F) {
h := hash.Hash([]byte("hi"))
pb := &ethpb.SparseMerkleTrie{
Layers: []*ethpb.TrieLayer{
{
Layer: [][]byte{h[:]},
},
{
Layer: [][]byte{h[:]},
},
{
Layer: [][]byte{},
},
},
Depth: 4,
}
b, err := proto.Marshal(pb)
require.NoError(f, err)
f.Add(b)
f.Fuzz(func(t *testing.T, b []byte) {
pb := &ethpb.SparseMerkleTrie{}
if err := proto.Unmarshal(b, pb); err != nil {
return
}
_, err := trie.CreateTrieFromProto(pb).HashTreeRoot()
if err != nil {
return
}
})
}
func FuzzSparseMerkleTrie_MerkleProof(f *testing.F) {
h := hash.Hash([]byte("hi"))
pb := &ethpb.SparseMerkleTrie{
Layers: []*ethpb.TrieLayer{
{
Layer: [][]byte{h[:]},
},
{
Layer: [][]byte{h[:]},
},
{
Layer: [][]byte{},
},
},
Depth: 4,
}
b, err := proto.Marshal(pb)
require.NoError(f, err)
f.Add(b, 0)
f.Fuzz(func(t *testing.T, b []byte, i int) {
pb := &ethpb.SparseMerkleTrie{}
if err := proto.Unmarshal(b, pb); err != nil {
return
}
_, err := trie.CreateTrieFromProto(pb).MerkleProof(i)
if err != nil {
return
}
})
}
func FuzzSparseMerkleTrie_Insert(f *testing.F) {
h := hash.Hash([]byte("hi"))
pb := &ethpb.SparseMerkleTrie{
Layers: []*ethpb.TrieLayer{
{
Layer: [][]byte{h[:]},
},
{
Layer: [][]byte{h[:]},
},
{
Layer: [][]byte{},
},
},
Depth: 4,
}
b, err := proto.Marshal(pb)
require.NoError(f, err)
f.Add(b, []byte{}, 0)
f.Fuzz(func(t *testing.T, b, item []byte, i int) {
pb := &ethpb.SparseMerkleTrie{}
if err := proto.Unmarshal(b, pb); err != nil {
return
}
if err := trie.CreateTrieFromProto(pb).Insert(item, i); err != nil {
return
}
})
}
func FuzzSparseMerkleTrie_VerifyMerkleProofWithDepth(f *testing.F) {
splitProofs := func(proofRaw []byte) [][]byte {
var proofs [][]byte
for i := 0; i < len(proofRaw); i += 32 {
end := i + 32
if end >= len(proofRaw) {
end = len(proofRaw) - 1
}
proofs = append(proofs, proofRaw[i:end])
}
return proofs
}
items := [][]byte{
[]byte("A"),
[]byte("B"),
[]byte("C"),
[]byte("D"),
[]byte("E"),
[]byte("F"),
[]byte("G"),
[]byte("H"),
}
m, err := trie.GenerateTrieFromItems(items, params.BeaconConfig().DepositContractTreeDepth)
require.NoError(f, err)
proof, err := m.MerkleProof(0)
require.NoError(f, err)
require.Equal(f, int(params.BeaconConfig().DepositContractTreeDepth)+1, len(proof))
root, err := m.HashTreeRoot()
require.NoError(f, err)
var proofRaw []byte
for _, p := range proof {
proofRaw = append(proofRaw, p...)
}
f.Add(root[:], items[0], uint64(0), proofRaw, params.BeaconConfig().DepositContractTreeDepth)
f.Fuzz(func(t *testing.T, root, item []byte, merkleIndex uint64, proofRaw []byte, depth uint64) {
trie.VerifyMerkleProofWithDepth(root, item, merkleIndex, splitProofs(proofRaw), depth)
})
}

View File

@@ -0,0 +1,4 @@
go test fuzz v1
[]byte("")
[]byte("0")
int(41)

View File

@@ -0,0 +1,3 @@
go test fuzz v1
[]byte("")
int(63)

Some files were not shown because too many files have changed in this diff Show More