Compare commits

..

16 Commits

Author SHA1 Message Date
Taranpreet26311
efcfbbf027 Test change e2e skip true 2022-05-24 23:56:45 +05:30
Taranpreet26311
bbfa665f4c fixing 2022-05-24 23:48:54 +05:30
Taranpreet26311
d3f934908d Add e2e skip change 2022-05-24 23:44:08 +05:30
Kasey Kirkham
f44b0fbf84 Merge branch 'develop' into sync-from-finalized 2022-05-24 11:25:58 -05:00
Kasey Kirkham
db492b5281 lint 2022-05-24 09:37:46 -05:00
Kasey Kirkham
e15b95cc09 include conn index in log for debugging 2022-05-24 09:25:54 -05:00
Kasey Kirkham
29ffa6a9e5 Merge branch 'develop' into sync-from-finalized 2022-05-24 09:15:19 -05:00
Kasey Kirkham
fabbd0e4e1 wip: pushing for CI 2022-05-24 08:04:07 -05:00
Kasey Kirkham
5843ce8eca add TestCheckpointSync option 2022-05-23 20:30:12 -05:00
Kasey Kirkham
aec3fac787 functional opts for the minimal e2e 2022-05-23 15:00:18 -05:00
kasey
0bfa880013 Merge branch 'develop' into sync-from-finalized 2022-05-23 12:41:47 -05:00
Kasey Kirkham
e3ec2d3ec1 Merge branch 'develop' into sync-from-finalized 2022-05-20 17:35:37 -05:00
Kasey Kirkham
aa06aa24ef gofmt 2022-05-20 17:35:07 -05:00
Kasey Kirkham
a8d45f03c5 happy path test for sync-from-finalized 2022-05-20 15:12:17 -05:00
Kasey Kirkham
6b600d086b Merge branch 'develop' into sync-from-finalized 2022-05-20 13:58:58 -05:00
Kasey Kirkham
1ca93b9e48 checkpoint sync use finalized state+block
instead of finding the block at the beginning of the weak subjectivity
epoch.
2022-05-20 08:07:01 -05:00
145 changed files with 1147 additions and 4412 deletions

View File

@@ -44,3 +44,6 @@ build --flaky_test_attempts=5
# Better caching
build:nostamp --nostamp
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
# E2E test config, to add this --config=e2e
test:e2e --test_keep_going=true

View File

@@ -89,10 +89,10 @@ jobs:
# Use blst tag to allow go and bazel builds for blst.
run: go build -v ./...
# fuzz leverage go tag based stubs at compile time.
# Building and testing with these tags should be checked and enforced at pre-submit.
- name: Test for fuzzing
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
# fuzz and blst_disabled leverage go tag based stubs at compile time.
# Building with these tags should be checked and enforced at pre-submit.
- name: Build for fuzzing
run: go build -tags=fuzz,blst_disabled ./...
# Tests run via Bazel for now...
# - name: Test

View File

@@ -231,7 +231,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "9c93f87378aaa6d6fe1c67b396eac2aacc9594af2a83f028cb99c95dea5b81df",
sha256 = "57120e8e2d5aaec956fc6a25ddc58fae2477f5b3ac7789174cf5ac1106dcc151",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -247,7 +247,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "52f2c52415228cee8a4de5a09abff785f439a77dfef8f03e834e4e16857673c1",
sha256 = "aa46676b26c173274ec8ea8756ae3072474b73ef7ccc7414d4026884810d8de2",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -263,7 +263,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "022dcc0d6de7dd27b337a0d1b945077eaf5ee47000700395a693fc25e12f96df",
sha256 = "d7dba93110cf35d9575ce21af6b7c3989f4aba621a9749bc090bca216e0345f7",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)

View File

@@ -27,8 +27,6 @@ var (
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
errNotDescendantOfFinalized = invalidBlock{errors.New("not descendant of finalized checkpoint")}
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -108,16 +107,16 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
// Do nothing if head hasn't changed.
oldHeadroot, err := s.HeadRoot(ctx)
r, err := s.HeadRoot(ctx)
if err != nil {
return err
}
if newHeadRoot == bytesutil.ToBytes32(oldHeadroot) {
if headRoot == bytesutil.ToBytes32(r) {
return nil
}
if err := wrapper.BeaconBlockIsNil(headBlock); err != nil {
@@ -129,19 +128,17 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// If the head state is not available, just return nil.
// There's nothing to cache
if !s.cfg.BeaconDB.HasStateSummary(ctx, newHeadRoot) {
if !s.cfg.BeaconDB.HasStateSummary(ctx, headRoot) {
return nil
}
// A chain re-org occurred, so we fire an event notifying the rest of the services.
s.headLock.RLock()
oldHeadRoot := s.headRoot()
oldStateRoot := s.headBlock().Block().StateRoot()
s.headLock.RUnlock()
headSlot := s.HeadSlot()
newHeadSlot := headBlock.Block().Slot()
oldHeadRoot := s.headRoot()
oldStateRoot := s.headBlock().Block().StateRoot()
newStateRoot := headBlock.Block().StateRoot()
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(oldHeadroot) {
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
@@ -157,7 +154,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
Slot: newHeadSlot,
Depth: absoluteSlotDifference,
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: newHeadRoot[:],
NewHeadBlock: headRoot[:],
OldHeadState: oldStateRoot,
NewHeadState: newStateRoot,
Epoch: slots.ToEpoch(newHeadSlot),
@@ -165,24 +162,25 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
},
})
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(oldHeadroot), newHeadRoot); err != nil {
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(r)); err != nil {
return err
}
reorgCount.Inc()
}
// Cache the new head info.
s.setHead(newHeadRoot, headBlock, headState)
s.setHead(headRoot, headBlock, headState)
// Save the new head root to DB.
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, newHeadRoot); err != nil {
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
return errors.Wrap(err, "could not save head root in DB")
}
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, newHeadRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()
@@ -355,48 +353,35 @@ func (s *Service) notifyNewHeadEvent(
return nil
}
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
switch {
// Exit early if there's no common ancestor as there would be nothing to save.
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
return nil
case err != nil:
// This saves the attestations inside the beacon block with respect to root `orphanedRoot` back into the
// attestation pool. It also filters out the attestations that is one epoch older as a
// defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
if err != nil {
return err
}
for orphanedRoot != commonAncestorRoot {
if ctx.Err() != nil {
return ctx.Err()
}
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
if err != nil {
return err
}
// If the block is an epoch older, break out of the loop since we can't include atts anyway.
// This prevents stuck within this for loop longer than necessary.
if orphanedBlk.Block().Slot()+params.BeaconConfig().SlotsPerEpoch <= s.CurrentSlot() {
break
}
for _, a := range orphanedBlk.Block().Body().Attestations() {
// if the attestation is one epoch older, it wouldn't been useful to save it.
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()
}
orphanedRoot = bytesutil.ToBytes32(orphanedBlk.Block().ParentRoot())
if orphanedBlk == nil || orphanedBlk.IsNil() {
return errors.New("orphaned block can't be nil")
}
for _, a := range orphanedBlk.Block().Body().Attestations() {
// Is the attestation one epoch older.
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
continue
}
if helpers.IsAggregated(a) {
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
return err
}
} else {
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
return err
}
}
saveOrphanedAttCount.Inc()
}
return nil
}

View File

@@ -3,7 +3,6 @@ package blockchain
import (
"bytes"
"context"
"sort"
"testing"
"time"
@@ -11,11 +10,9 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
@@ -52,8 +49,6 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0))
service.head = &head{
slot: 0,
root: oldRoot,
@@ -69,8 +64,6 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
@@ -100,8 +93,6 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0))
service.head = &head{
slot: 0,
root: oldRoot,
@@ -119,8 +110,6 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0))
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(1))
@@ -240,355 +229,50 @@ func Test_notifyNewHeadEvent(t *testing.T) {
})
}
func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// -4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestSaveOrphanedAtts(t *testing.T) {
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.genesisTime = time.Now()
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
savedAtts := service.cfg.AttPool.AggregatedAttestations()
atts := b.Block.Body.Attestations
require.DeepSSZEqual(t, atts, savedAtts)
}
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
genesis, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.NoError(t, service.saveOrphanedAtts(ctx, r))
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableForkChoiceDoublyLinkedTree: true,
})
defer resetCfg()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// -4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
}
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableForkChoiceDoublyLinkedTree: true,
})
defer resetCfg()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2 -- 3
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
assert.NoError(t, err)
blk3.Block.ParentRoot = r2[:]
r3, err := blk3.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []*ethpb.Attestation{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
}
atts := service.cfg.AttPool.AggregatedAttestations()
sort.Slice(atts, func(i, j int) bool {
return atts[i].Data.Slot > atts[j].Data.Slot
})
require.DeepEqual(t, wantAtts, atts)
}
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableForkChoiceDoublyLinkedTree: true,
})
defer resetCfg()
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
// Chain setup
// 0 -- 1 -- 2
// \-4
st, keys := util.DeterministicGenesisState(t, 64)
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
assert.NoError(t, err)
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
rG, err := blkG.Block.HashTreeRoot()
require.NoError(t, err)
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
assert.NoError(t, err)
blk1.Block.ParentRoot = rG[:]
r1, err := blk1.Block.HashTreeRoot()
require.NoError(t, err)
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
assert.NoError(t, err)
blk2.Block.ParentRoot = r1[:]
r2, err := blk2.Block.HashTreeRoot()
require.NoError(t, err)
blk4 := util.NewBeaconBlock()
blk4.Block.Slot = 4
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertOptimisticBlock(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, b))
}
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
savedAtts := service.cfg.AttPool.AggregatedAttestations()
atts := b.Block.Body.Attestations
require.DeepNotSSZEqual(t, atts, savedAtts)
}
func TestUpdateHead_noSavedChanges(t *testing.T) {

View File

@@ -60,23 +60,27 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
return err
}
level := log.Logger.GetLevel()
log = log.WithField("slot", block.Slot())
if level >= logrus.DebugLevel {
log = log.WithField("slotInEpoch", block.Slot()%params.BeaconConfig().SlotsPerEpoch)
log = log.WithField("justifiedEpoch", justified.Epoch)
log = log.WithField("justifiedRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]))
log = log.WithField("parentRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]))
log = log.WithField("version", version.String(block.Version()))
log = log.WithField("sinceSlotStartTime", prysmTime.Now().Sub(startTime))
log = log.WithField("chainServiceProcessedTime", prysmTime.Now().Sub(receivedTime))
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
}).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
}
log.WithFields(logrus.Fields{
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
}).Info("Synced new block")
return nil
}

View File

@@ -118,7 +118,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
}
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
if err != nil {
return fmt.Errorf("could not verify new payload: %v", err)
return errors.Wrap(err, "could not verify new payload")
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
@@ -335,33 +335,33 @@ func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPaylo
}
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock,
blockRoots [][32]byte) error {
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
if len(blks) == 0 || len(blockRoots) == 0 {
return errors.New("no blocks provided")
return nil, nil, errors.New("no blocks provided")
}
if len(blks) != len(blockRoots) {
return errWrongBlockCount
return nil, nil, errWrongBlockCount
}
if err := wrapper.BeaconBlockIsNil(blks[0]); err != nil {
return invalidBlock{err}
return nil, nil, invalidBlock{err}
}
b := blks[0].Block()
// Retrieve incoming block's pre state.
if err := s.verifyBlkPreState(ctx, b); err != nil {
return err
return nil, nil, err
}
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
if err != nil {
return err
return nil, nil, err
}
if preState == nil || preState.IsNil() {
return fmt.Errorf("nil pre state for slot %d", b.Slot())
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
}
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
@@ -382,7 +382,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
for i, b := range blks {
v, h, err := getStateVersionAndPayload(preState)
if err != nil {
return err
return nil, nil, err
}
preVersionAndHeaders[i] = &versionAndHeader{
version: v,
@@ -391,7 +391,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
if err != nil {
return invalidBlock{err}
return nil, nil, invalidBlock{err}
}
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
@@ -402,7 +402,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
v, h, err = getStateVersionAndPayload(preState)
if err != nil {
return err
return nil, nil, err
}
postVersionAndHeaders[i] = &versionAndHeader{
version: v,
@@ -412,10 +412,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
verify, err := sigSet.Verify()
if err != nil {
return invalidBlock{err}
return nil, nil, invalidBlock{err}
}
if !verify {
return errors.New("batch block signature verification failed")
return nil, nil, errors.New("batch block signature verification failed")
}
// blocks have been verified, add them to forkchoice and call the engine
@@ -424,40 +424,40 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return err
return nil, nil, err
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
preVersionAndHeaders[i].header, b); err != nil {
return err
return nil, nil, err
}
}
if err := s.insertBlockToForkChoiceStore(ctx, b.Block(), blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
return err
return nil, nil, err
}
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoots[i]); err != nil {
return errors.Wrap(err, "could not set optimistic block to valid")
return nil, nil, errors.Wrap(err, "could not set optimistic block to valid")
}
}
s.saveInitSyncBlock(blockRoots[i], b)
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
tracing.AnnotateError(span, err)
return err
return nil, nil, err
}
}
for r, st := range boundaries {
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return err
return nil, nil, err
}
}
// Also saves the last post state which to be used as pre state for the next batch.
lastB := blks[len(blks)-1]
lastBR := blockRoots[len(blockRoots)-1]
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return err
return nil, nil, err
}
arg := &notifyForkchoiceUpdateArg{
headState: preState,
@@ -465,9 +465,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
headBlock: lastB.Block(),
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
return nil, nil, err
}
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
return nil, nil, err
}
return fCheckpoints, jCheckpoints, nil
}
// handles a block after the block's batch has been verified, where we can save blocks
@@ -518,12 +521,6 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
return err
}
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fCheckpoint); err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.Prune(ctx, bytesutil.ToBytes32(fCheckpoint.Root)); err != nil {
return errors.Wrap(err, "could not prune proto array fork choice nodes")
}
}
return nil
}
@@ -540,20 +537,16 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
return err
}
// Update caches for the next epoch at epoch boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
if err := helpers.UpdateCommitteeCache(copied, coreTime.CurrentEpoch(copied)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
return err
}
} else if postState.Slot() >= s.nextEpochBoundarySlot {
s.headLock.RLock()
st := s.head.state
s.headLock.RUnlock()
if err := reportEpochMetrics(ctx, postState, st); err != nil {
if err := reportEpochMetrics(ctx, postState, s.head.state); err != nil {
return err
}
var err error
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
if err != nil {
@@ -562,7 +555,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// Update caches at epoch boundary slot.
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
if err := helpers.UpdateCommitteeCache(postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
@@ -607,7 +600,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfac
}
// Feed in block to fork choice store.
payloadHash, err := blocks.GetBlockPayloadHash(blk)
payloadHash, err := getBlockPayloadHash(blk)
if err != nil {
return err
}
@@ -628,6 +621,18 @@ func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashing
}
}
func getBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
payloadHash := [32]byte{}
if blocks.IsPreBellatrixVersion(blk.Version()) {
return payloadHash, nil
}
payload, err := blk.Body().ExecutionPayload()
if err != nil {
return payloadHash, err
}
return bytesutil.ToBytes32(payload.BlockHash), nil
}
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st state.BeaconState) error {

View File

@@ -7,7 +7,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
@@ -242,7 +241,8 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
return s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(cp)
return nil
}
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
@@ -346,8 +346,11 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot)
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.BeaconBlock,
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
pendingNodes := make([]interfaces.BeaconBlock, 0)
pendingRoots := make([][32]byte, 0)
parentRoot := bytesutil.ToBytes32(blk.ParentRoot())
slot := blk.Slot()
// Fork choice only matters from last finalized slot.
finalized, err := s.store.FinalizedCheckpt()
if err != nil {
@@ -357,31 +360,39 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
if err != nil {
return err
}
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
JustifiedEpoch: jCheckpoint.Epoch, FinalizedEpoch: fCheckpoint.Epoch})
higherThanFinalized := slot > fSlot
// As long as parent node is not in fork choice store, and parent node is in DB.
root := bytesutil.ToBytes32(blk.ParentRoot())
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
b, err := s.getBlock(ctx, root)
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
b, err := s.getBlock(ctx, parentRoot)
if err != nil {
return err
}
if b.Block().Slot() <= fSlot {
break
pendingNodes = append(pendingNodes, b.Block())
copiedRoot := parentRoot
pendingRoots = append(pendingRoots, copiedRoot)
parentRoot = bytesutil.ToBytes32(b.Block().ParentRoot())
slot = b.Block().Slot()
higherThanFinalized = slot > fSlot
}
// Insert parent nodes to fork choice store in reverse order.
// Lower slots should be at the end of the list.
for i := len(pendingNodes) - 1; i >= 0; i-- {
b := pendingNodes[i]
r := pendingRoots[i]
payloadHash, err := getBlockPayloadHash(blk)
if err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()), payloadHash,
jCheckpoint.Epoch,
fCheckpoint.Epoch); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
root = bytesutil.ToBytes32(b.Block().ParentRoot())
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
JustifiedEpoch: jCheckpoint.Epoch,
FinalizedEpoch: fCheckpoint.Epoch}
pendingNodes = append(pendingNodes, args)
}
if len(pendingNodes) == 1 {
return nil
}
if root != s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root)) {
return errNotDescendantOfFinalized
}
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)
return nil
}
// inserts finalized deposits into our finalized deposit trie.

View File

@@ -305,7 +305,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
var blks []interfaces.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
for i := 1; i < 97; i++ {
for i := 1; i < 10; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
@@ -331,15 +331,10 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
jcp, err := service.store.JustifiedCheckpt()
require.NoError(t, err)
jroot := bytesutil.ToBytes32(jcp.Root)
require.Equal(t, blkRoots[63], jroot)
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedEpoch())
}
func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
@@ -375,7 +370,7 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
var blks []interfaces.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
for i := 1; i < 97; i++ {
for i := 1; i < 10; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
@@ -401,15 +396,10 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
jcp, err := service.store.JustifiedCheckpt()
require.NoError(t, err)
jroot := bytesutil.ToBytes32(jcp.Root)
require.Equal(t, blkRoots[63], jroot)
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedEpoch())
}
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
@@ -462,8 +452,10 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
cp1, cp2, err := service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
require.Equal(t, blkCount-1, len(cp1))
require.Equal(t, blkCount-1, len(cp2))
}
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
@@ -740,6 +732,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -762,14 +755,12 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
@@ -786,6 +777,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -809,14 +801,12 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
@@ -833,6 +823,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -856,15 +847,14 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Ensure all roots and their respective blocks exist.
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
for i, rt := range wantedRoots {
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
@@ -882,6 +872,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -905,15 +896,14 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[0]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B3 - B4 - B6 - B8
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Ensure all roots and their respective blocks exist.
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
for i, rt := range wantedRoots {
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
@@ -931,73 +921,8 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
assert.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
// Define a tree branch, slot 63 <- 64 <- 65 <- 66
b63 := util.NewBeaconBlock()
b63.Block.Slot = 63
wsb, err = wrapper.WrappedSignedBeaconBlock(b63)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r63, err := b63.Block.HashTreeRoot()
require.NoError(t, err)
b64 := util.NewBeaconBlock()
b64.Block.Slot = 64
b64.Block.ParentRoot = r63[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(b64)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r64, err := b64.Block.HashTreeRoot()
require.NoError(t, err)
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
r65, err := b65.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
b66 := util.NewBeaconBlock()
b66.Block.Slot = 66
b66.Block.ParentRoot = r65[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(b66)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
beaconState, _ := util.DeterministicGenesisState(t, 32)
// Set finalized epoch to 2.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// We should have saved 1 node: block 65
assert.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r65), "Didn't save node")
}
func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
@@ -1030,32 +955,23 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
r65, err := b65.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
b66 := util.NewBeaconBlock()
b66.Block.Slot = 66
b66.Block.ParentRoot = r65[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(b66)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
beaconState, _ := util.DeterministicGenesisState(t, 32)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 1 node: block 65
assert.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r65), "Didn't save node")
// There should be 2 nodes, block 65 and block 64.
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
}
func TestFillForkChoiceMissingBlocks_FinalizedSibling_DoublyLinkedTree(t *testing.T) {
func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -1066,39 +982,61 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling_DoublyLinkedTree(t *testin
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Epoch: 1}, [32]byte{})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
// Define a tree branch, slot 63 <- 64 <- 65
b63 := util.NewBeaconBlock()
b63.Block.Slot = 63
wsb, err = wrapper.WrappedSignedBeaconBlock(b63)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r63, err := b63.Block.HashTreeRoot()
require.NoError(t, err)
b64 := util.NewBeaconBlock()
b64.Block.Slot = 64
b64.Block.ParentRoot = r63[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(b64)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
r64, err := b64.Block.HashTreeRoot()
require.NoError(t, err)
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
beaconState, _ := util.DeterministicGenesisState(t, 32)
blk := util.NewBeaconBlock()
blk.Block.Slot = 9
blk.Block.ParentRoot = roots[8]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: roots[1]}, [32]byte{})
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.ErrorIs(t, errNotDescendantOfFinalized, err)
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
}
// blockTree1 constructs the following tree:
// /- B1
// B0 /- B5 - B7
// \- B3 - B4 - B6 - B8
// (B1, and B3 are all from the same slots)
func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
genesisRoot = bytesutil.PadTo(genesisRoot, 32)
b0 := util.NewBeaconBlock()

View File

@@ -164,26 +164,21 @@ func (s *Service) UpdateHead(ctx context.Context) error {
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
s.headLock.RLock()
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
}
s.headLock.RUnlock()
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
return nil
}
// This calls notify Forkchoice Update in the event that the head has changed
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
s.headLock.RLock()
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
s.headLock.RUnlock()
return
}
s.headLock.RUnlock()
if !s.hasBlockInInitSyncOrDB(ctx, newHeadRoot) {
log.Debug("New head does not exist in DB. Do nothing")

View File

@@ -233,8 +233,6 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(
ctx, wb.Block().Slot(), r, bytesutil.ToBytes32(wb.Block().ParentRoot()), [32]byte{}, 0, 0))
service.head.root = r // Old head
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.NoError(t, err, service.UpdateHead(ctx))

View File

@@ -89,7 +89,8 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
defer span.End()
// Apply state transition on the incoming newly received block batches, one by one.
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
if err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
return err

View File

@@ -15,7 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -145,18 +144,13 @@ func (s *Service) Start() {
func (s *Service) Stop() error {
defer s.cancel()
// lock before accessing s.head, s.head.state, s.head.state.FinalizedCheckpoint().Root
s.headLock.RLock()
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
r := s.head.state.FinalizedCheckpoint().Root
s.headLock.RUnlock()
// Save the last finalized state so that starting up in the following run will be much faster.
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, r); err != nil {
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
return err
}
} else {
s.headLock.RUnlock()
}
// Save initial sync cached blocks to the DB before stop.
return s.cfg.BeaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
}
@@ -218,7 +212,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if err != nil {
return errors.Wrap(err, "could not get finalized checkpoint block")
}
payloadHash, err := blocks.GetBlockPayloadHash(fb.Block())
payloadHash, err := getBlockPayloadHash(fb.Block())
if err != nil {
return errors.Wrap(err, "could not get execution payload hash")
}
@@ -237,9 +231,8 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
s.headLock.RLock()
h := s.headBlock().Block()
s.headLock.RUnlock()
if h.Slot() > fSlot {
log.WithFields(logrus.Fields{
"startSlot": fSlot,
@@ -452,7 +445,7 @@ func (s *Service) initializeBeaconChain(
s.cfg.ChainStartFetcher.ClearPreGenesisData()
// Update committee shuffled indices for genesis epoch.
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
return nil, err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
@@ -485,7 +478,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
genesisCheckpoint := genesisState.FinalizedCheckpoint()
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
payloadHash, err := blocks.GetBlockPayloadHash(genesisBlk.Block())
payloadHash, err := getBlockPayloadHash(genesisBlk.Block())
if err != nil {
return err
}

View File

@@ -1,22 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"option.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/builder",
visibility = ["//visibility:public"],
deps = [
"//api/client/builder:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -1,7 +0,0 @@
package builder
import "github.com/pkg/errors"
var (
ErrNotRunning = errors.New("builder is not running")
)

View File

@@ -1,30 +0,0 @@
package builder
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
submitBlindedBlockLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "submit_blinded_block_latency_milliseconds",
Help: "Captures RPC latency for submitting blinded block in milliseconds",
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
},
)
getHeaderLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "get_header_latency_milliseconds",
Help: "Captures RPC latency for get header in milliseconds",
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
},
)
registerValidatorLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "register_validator_latency_milliseconds",
Help: "Captures RPC latency for register validator in milliseconds",
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
},
)
)

View File

@@ -1,36 +0,0 @@
package builder
import (
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
"github.com/urfave/cli/v2"
)
type Option func(s *Service) error
// FlagOptions for builder service flag configurations.
func FlagOptions(c *cli.Context) ([]Option, error) {
endpoint := c.String(flags.MevRelayEndpoint.Name)
opts := []Option{
WithBuilderEndpoints(endpoint),
}
return opts, nil
}
// WithBuilderEndpoints sets the endpoint for the beacon chain builder service.
func WithBuilderEndpoints(endpoint string) Option {
return func(s *Service) error {
s.cfg.builderEndpoint = covertEndPoint(endpoint)
return nil
}
}
func covertEndPoint(ep string) network.Endpoint {
return network.Endpoint{
Url: ep,
Auth: network.AuthorizationData{ // Auth is not used for builder.
Method: authorization.None,
Value: "",
}}
}

View File

@@ -1,58 +0,0 @@
package builder
import (
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/network"
"github.com/prysmaticlabs/prysm/network/authorization"
"github.com/urfave/cli/v2"
)
type Option func(s *Service) error
// FlagOptions for builder service flag configurations.
func FlagOptions(c *cli.Context) ([]Option, error) {
endpoints := parseBuilderEndpoints(c)
opts := []Option{
WithBuilderEndpoints(endpoints),
}
return opts, nil
}
func WithBuilderEndpoints(endpoints []string) Option {
return func(s *Service) error {
stringEndpoints := dedupEndpoints(endpoints)
endpoints := make([]network.Endpoint, len(stringEndpoints))
for i, e := range stringEndpoints {
endpoints[i] = covertEndPoint(e)
}
s.cfg.builderEndpoint = endpoints[0] // Use the first one as the default.
return nil
}
}
func covertEndPoint(ep string) network.Endpoint {
return network.Endpoint{
Url: ep,
Auth: network.AuthorizationData{ // Not sure about authorization for now.
Method: authorization.None,
Value: "",
}}
}
func parseBuilderEndpoints(c *cli.Context) []string {
// Goal is to support multiple end points later.
return []string{c.String(flags.MevBuilderFlag.Name)}
}
func dedupEndpoints(endpoints []string) []string {
selectionMap := make(map[string]bool)
newEndpoints := make([]string, 0, len(endpoints))
for _, point := range endpoints {
if selectionMap[point] {
continue
}
newEndpoints = append(newEndpoints, point)
selectionMap[point] = true
}
return newEndpoints
}

View File

@@ -1,77 +0,0 @@
package builder
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/api/client/builder"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/network"
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// BlockBuilder defines the interface for interacting with the block builder
type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error)
GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error)
Status() error
RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error
}
// config defines a config struct for dependencies into the service.
type config struct {
builderEndpoint network.Endpoint
}
// Service defines a service that provides a client for interacting with the beacon chain and MEV relay network.
type Service struct {
cfg *config
c *builder.Client
}
// NewService instantiates a new service.
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
s := &Service{}
for _, opt := range opts {
if err := opt(s); err != nil {
return nil, err
}
}
if s.cfg.builderEndpoint.Url != "" {
c, err := builder.NewClient(s.cfg.builderEndpoint.Url)
if err != nil {
return nil, err
}
s.c = c
}
return s, nil
}
// Start initializes the service.
func (*Service) Start() {}
// Stop halts the service.
func (*Service) Stop() error {
return nil
}
// SubmitBlindedBlock is currently a stub.
func (*Service) SubmitBlindedBlock(context.Context, *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
return nil, errors.New("not implemented")
}
// GetHeader is currently a stub.
func (*Service) GetHeader(context.Context, types.Slot, [32]byte, [48]byte) (*ethpb.SignedBuilderBid, error) {
return nil, errors.New("not implemented")
}
// Status is currently a stub.
func (*Service) Status() error {
return errors.New("not implemented")
}
// RegisterValidator is currently a stub.
func (*Service) RegisterValidator(context.Context, *ethpb.SignedValidatorRegistrationV1) error {
return errors.New("not implemented")
}

View File

@@ -1,29 +0,0 @@
package builder
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
)
func VerifyRegistrationSignature(
slot types.Slot,
fork *ethpb.Fork,
signed *ethpb.SignedValidatorRegistrationV1,
genesisRoot []byte,
) error {
if signed == nil || signed.Message == nil {
return errors.New("nil signed registration")
}
domain, err := signing.Domain(fork, slots.ToEpoch(slot), [4]byte{} /*TODO: Use registration signing domain */, genesisRoot)
if err != nil {
return err
}
if err := signing.VerifySigningRoot(signed, signed.Message.Pubkey, signed.Signature, domain); err != nil {
return signing.ErrSigFailedToVerify
}
return nil
}

View File

@@ -103,12 +103,9 @@ func (c *CommitteeCache) Committee(ctx context.Context, slot types.Slot, seed [3
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
// his method also trims the least recently list if the cache size has ready the max cache size limit.
func (c *CommitteeCache) AddCommitteeShuffledList(ctx context.Context, committees *Committees) error {
func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
c.lock.Lock()
defer c.lock.Unlock()
if err := ctx.Err(); err != nil {
return err
}
key, err := committeeKeyFn(committees)
if err != nil {
return err

View File

@@ -27,7 +27,7 @@ func (c *FakeCommitteeCache) Committee(ctx context.Context, slot types.Slot, see
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
// his method also trims the least recently list if the cache size has ready the max cache size limit.
func (c *FakeCommitteeCache) AddCommitteeShuffledList(ctx context.Context, committees *Committees) error {
func (c *FakeCommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
return nil
}

View File

@@ -31,7 +31,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
require.NoError(t, cache.AddCommitteeShuffledList(c))
_, err := cache.Committee(context.Background(), 0, c.Seed, 0)
require.NoError(t, err)
}
@@ -46,7 +46,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
require.NoError(t, cache.AddCommitteeShuffledList(c))
indices, err := cache.ActiveIndices(context.Background(), c.Seed)
require.NoError(t, err)

View File

@@ -50,7 +50,7 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
if indices != nil {
t.Error("Expected committee not to exist in empty cache")
}
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
require.NoError(t, cache.AddCommitteeShuffledList(item))
wantedIndex := types.CommitteeIndex(0)
indices, err = cache.Committee(context.Background(), slot, item.Seed, wantedIndex)
@@ -70,7 +70,7 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
t.Error("Expected committee not to exist in empty cache")
}
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
require.NoError(t, cache.AddCommitteeShuffledList(item))
indices, err = cache.ActiveIndices(context.Background(), item.Seed)
require.NoError(t, err)
@@ -85,7 +85,7 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
require.NoError(t, cache.AddCommitteeShuffledList(item))
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
require.NoError(t, err)
@@ -101,7 +101,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
for i := start; i < end; i++ {
s := []byte(strconv.Itoa(i))
item := &Committees{Seed: bytesutil.ToBytes32(s)}
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
require.NoError(t, cache.AddCommitteeShuffledList(item))
}
k := cache.CommitteeCache.Keys()
@@ -134,20 +134,3 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
_, err = cache.Committee(context.Background(), 0, seed, math.MaxUint64) // Overflow!
require.NotNil(t, err, "Did not fail as expected")
}
func TestCommitteeCache_DoesNothingWhenCancelledContext(t *testing.T) {
cache := NewCommitteesCache()
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
require.NoError(t, err)
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
cancelled, cancel := context.WithCancel(context.Background())
cancel()
require.ErrorIs(t, cache.AddCommitteeShuffledList(cancelled, item), context.Canceled)
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
require.NoError(t, err)
assert.Equal(t, 0, count)
}

View File

@@ -47,7 +47,7 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
return err
}
}
// Set validator's active status for previous epoch.
// Set validator's active status for preivous epoch.
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
v.IsActivePrevEpoch = true
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance())

View File

@@ -10,7 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
@@ -272,16 +271,3 @@ func ProcessPayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadHe
}
return st, nil
}
// GetBlockPayloadHash returns the hash of the execution payload of the block
func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
payloadHash := [32]byte{}
if IsPreBellatrixVersion(blk.Version()) {
return payloadHash, nil
}
payload, err := blk.Body().ExecutionPayload()
if err != nil {
return payloadHash, err
}
return bytesutil.ToBytes32(payload.BlockHash), nil
}

View File

@@ -28,7 +28,6 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -44,13 +43,11 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -2,7 +2,6 @@ package precompute
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -10,24 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
var errNilState = errors.New("nil state")
// UnrealizedCheckpoints returns the justification and finalization checkpoints of the
// given state as if it was progressed with empty slots until the next epoch.
func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
if st == nil || st.IsNil() {
return nil, nil, errNilState
}
activeBalance, prevTarget, currentTarget, err := st.UnrealizedCheckpointBalances()
if err != nil {
return nil, nil, err
}
justification := processJustificationBits(st, activeBalance, prevTarget, currentTarget)
return computeCheckpoints(st, justification)
}
// ProcessJustificationAndFinalizationPreCompute processes justification and finalization during
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
// Note: this is an optimized version by passing in precomputed total and attesting balances.
@@ -53,55 +34,12 @@ func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal
return state, nil
}
newBits := processJustificationBits(state, pBal.ActiveCurrentEpoch, pBal.PrevEpochTargetAttested, pBal.CurrentEpochTargetAttested)
return weighJustificationAndFinalization(state, newBits)
return weighJustificationAndFinalization(state, pBal.ActiveCurrentEpoch, pBal.PrevEpochTargetAttested, pBal.CurrentEpochTargetAttested)
}
// processJustificationBits processes the justification bits during epoch processing.
func processJustificationBits(state state.BeaconState, totalActiveBalance, prevEpochTargetBalance, currEpochTargetBalance uint64) bitfield.Bitvector4 {
newBits := state.JustificationBits()
newBits.Shift(1)
// If 2/3 or more of total balance attested in the previous epoch.
if 3*prevEpochTargetBalance >= 2*totalActiveBalance {
newBits.SetBitAt(1, true)
}
if 3*currEpochTargetBalance >= 2*totalActiveBalance {
newBits.SetBitAt(0, true)
}
return newBits
}
// updateJustificationAndFinalization processes justification and finalization during
// weighJustificationAndFinalization processes justification and finalization during
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield.Bitvector4) (state.BeaconState, error) {
jc, fc, err := computeCheckpoints(state, newBits)
if err != nil {
return nil, err
}
if err := state.SetPreviousJustifiedCheckpoint(state.CurrentJustifiedCheckpoint()); err != nil {
return nil, err
}
if err := state.SetCurrentJustifiedCheckpoint(jc); err != nil {
return nil, err
}
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err
}
if err := state.SetFinalizedCheckpoint(fc); err != nil {
return nil, err
}
return state, nil
}
// computeCheckpoints computes the new Justification and Finalization
// checkpoints at epoch transition
//
// Spec pseudocode definition:
// def weigh_justification_and_finalization(state: BeaconState,
// total_active_balance: Gwei,
@@ -139,57 +77,88 @@ func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
// state.finalized_checkpoint = old_current_justified_checkpoint
func computeCheckpoints(state state.BeaconState, newBits bitfield.Bitvector4) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
func weighJustificationAndFinalization(state state.BeaconState,
totalActiveBalance, prevEpochTargetBalance, currEpochTargetBalance uint64) (state.BeaconState, error) {
prevEpoch := time.PrevEpoch(state)
currentEpoch := time.CurrentEpoch(state)
oldPrevJustifiedCheckpoint := state.PreviousJustifiedCheckpoint()
oldCurrJustifiedCheckpoint := state.CurrentJustifiedCheckpoint()
justifiedCheckpoint := state.CurrentJustifiedCheckpoint()
finalizedCheckpoint := state.FinalizedCheckpoint()
// Process justifications
if err := state.SetPreviousJustifiedCheckpoint(state.CurrentJustifiedCheckpoint()); err != nil {
return nil, err
}
newBits := state.JustificationBits()
newBits.Shift(1)
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err
}
// If 2/3 or more of the total balance attested in the current epoch.
if newBits.BitAt(0) {
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get block root for current epoch %d", currentEpoch)
}
justifiedCheckpoint.Epoch = currentEpoch
justifiedCheckpoint.Root = blockRoot
} else if newBits.BitAt(1) {
// If 2/3 or more of total balance attested in the previous epoch.
// Note: the spec refers to the bit index position starting at 1 instead of starting at zero.
// We will use that paradigm here for consistency with the godoc spec definition.
// If 2/3 or more of total balance attested in the previous epoch.
if 3*prevEpochTargetBalance >= 2*totalActiveBalance {
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
return nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
}
if err := state.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: prevEpoch, Root: blockRoot}); err != nil {
return nil, err
}
newBits = state.JustificationBits()
newBits.SetBitAt(1, true)
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err
}
}
// If 2/3 or more of the total balance attested in the current epoch.
if 3*currEpochTargetBalance >= 2*totalActiveBalance {
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
if err != nil {
return nil, errors.Wrapf(err, "could not get block root for current epoch %d", prevEpoch)
}
if err := state.SetCurrentJustifiedCheckpoint(&ethpb.Checkpoint{Epoch: currentEpoch, Root: blockRoot}); err != nil {
return nil, err
}
newBits = state.JustificationBits()
newBits.SetBitAt(0, true)
if err := state.SetJustificationBits(newBits); err != nil {
return nil, err
}
justifiedCheckpoint.Epoch = prevEpoch
justifiedCheckpoint.Root = blockRoot
}
// Process finalization according to Ethereum Beacon Chain specification.
if len(newBits) == 0 {
return nil, nil, errors.New("empty justification bits")
}
justification := newBits.Bytes()[0]
justification := state.JustificationBits().Bytes()[0]
// 2nd/3rd/4th (0b1110) most recent epochs are justified, the 2nd using the 4th as source.
if justification&0x0E == 0x0E && (oldPrevJustifiedCheckpoint.Epoch+3) == currentEpoch {
finalizedCheckpoint = oldPrevJustifiedCheckpoint
if err := state.SetFinalizedCheckpoint(oldPrevJustifiedCheckpoint); err != nil {
return nil, err
}
}
// 2nd/3rd (0b0110) most recent epochs are justified, the 2nd using the 3rd as source.
if justification&0x06 == 0x06 && (oldPrevJustifiedCheckpoint.Epoch+2) == currentEpoch {
finalizedCheckpoint = oldPrevJustifiedCheckpoint
if err := state.SetFinalizedCheckpoint(oldPrevJustifiedCheckpoint); err != nil {
return nil, err
}
}
// 1st/2nd/3rd (0b0111) most recent epochs are justified, the 1st using the 3rd as source.
if justification&0x07 == 0x07 && (oldCurrJustifiedCheckpoint.Epoch+2) == currentEpoch {
finalizedCheckpoint = oldCurrJustifiedCheckpoint
if err := state.SetFinalizedCheckpoint(oldCurrJustifiedCheckpoint); err != nil {
return nil, err
}
}
// The 1st/2nd (0b0011) most recent epochs are justified, the 1st using the 2nd as source
if justification&0x03 == 0x03 && (oldCurrJustifiedCheckpoint.Epoch+1) == currentEpoch {
finalizedCheckpoint = oldCurrJustifiedCheckpoint
if err := state.SetFinalizedCheckpoint(oldCurrJustifiedCheckpoint); err != nil {
return nil, err
}
}
return justifiedCheckpoint, finalizedCheckpoint, nil
return state, nil
}

View File

@@ -1,14 +1,11 @@
package precompute_test
import (
"context"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
@@ -126,127 +123,3 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], newState.FinalizedCheckpoint().Root)
assert.Equal(t, types.Epoch(0), newState.FinalizedCheckpointEpoch(), "Unexpected finalized epoch")
}
func TestUnrealizedCheckpoints(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, len(validators))
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
pjr := [32]byte{'p'}
cjr := [32]byte{'c'}
je := types.Epoch(3)
fe := types.Epoch(2)
pjcp := &ethpb.Checkpoint{Root: pjr[:], Epoch: fe}
cjcp := &ethpb.Checkpoint{Root: cjr[:], Epoch: je}
fcp := &ethpb.Checkpoint{Root: pjr[:], Epoch: fe}
tests := []struct {
name string
slot types.Slot
prevVals, currVals int
expectedJustified, expectedFinalized types.Epoch // The expected unrealized checkpoint epochs
}{
{
"Not enough votes, keep previous justification",
129,
len(validators) / 3,
len(validators) / 3,
je,
fe,
},
{
"Not enough votes, keep previous justification, N+2",
161,
len(validators) / 3,
len(validators) / 3,
je,
fe,
},
{
"Enough to justify previous epoch but not current",
129,
2*len(validators)/3 + 3,
len(validators) / 3,
je,
fe,
},
{
"Enough to justify previous epoch but not current, N+2",
161,
2*len(validators)/3 + 3,
len(validators) / 3,
je + 1,
fe,
},
{
"Enough to justify current epoch",
129,
len(validators) / 3,
2*len(validators)/3 + 3,
je + 1,
fe,
},
{
"Enough to justify current epoch, but not previous",
161,
len(validators) / 3,
2*len(validators)/3 + 3,
je + 2,
fe,
},
{
"Enough to justify current and previous",
161,
2*len(validators)/3 + 3,
2*len(validators)/3 + 3,
je + 2,
fe,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
base := &ethpb.BeaconStateAltair{
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
Slot: test.slot,
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
Balances: balances,
PreviousJustifiedCheckpoint: pjcp,
CurrentJustifiedCheckpoint: cjcp,
FinalizedCheckpoint: fcp,
InactivityScores: make([]uint64, len(validators)),
JustificationBits: make(bitfield.Bitvector4, 1),
}
for i := 0; i < test.prevVals; i++ {
base.PreviousEpochParticipation[i] = 0xFF
}
for i := 0; i < test.currVals; i++ {
base.CurrentEpochParticipation[i] = 0xFF
}
if test.slot > 130 {
base.JustificationBits.SetBitAt(2, true)
base.JustificationBits.SetBitAt(3, true)
} else {
base.JustificationBits.SetBitAt(1, true)
base.JustificationBits.SetBitAt(2, true)
}
state, err := v2.InitializeFromProto(base)
require.NoError(t, err)
_, _, err = altair.InitializePrecomputeValidators(context.Background(), state)
require.NoError(t, err)
jc, fc, err := precompute.UnrealizedCheckpoints(state)
require.NoError(t, err)
require.DeepEqual(t, test.expectedJustified, jc.Epoch)
require.DeepEqual(t, test.expectedFinalized, fc.Epoch)
})
}
}

View File

@@ -285,7 +285,7 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.Va
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch types.Epoch) error {
func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) error {
for _, e := range []types.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
@@ -311,7 +311,7 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,

View File

@@ -386,7 +386,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
require.NoError(t, UpdateCommitteeCache(state, time.CurrentEpoch(state)))
epoch := types.Epoch(1)
idx := types.CommitteeIndex(1)

View File

@@ -126,7 +126,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
return nil, err
}
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
if err := UpdateCommitteeCache(s, epoch); err != nil {
return nil, errors.Wrap(err, "could not update committee cache")
}
@@ -175,7 +175,7 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
return 0, err
}
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
if err := UpdateCommitteeCache(s, epoch); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
}

View File

@@ -301,7 +301,7 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
// Preset cache to a bad count.
seed, err := Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.NoError(t, committeeCache.AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []types.ValidatorIndex{1, 2, 3}}))
require.NoError(t, committeeCache.AddCommitteeShuffledList(&cache.Committees{Seed: seed, ShuffledIndices: []types.ValidatorIndex{1, 2, 3}}))
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count")

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"domain.go",
"signature.go",
"signing_root.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/signing",
@@ -25,7 +24,6 @@ go_test(
name = "go_default_test",
srcs = [
"domain_test.go",
"signature_test.go",
"signing_root_test.go",
],
embed = [":go_default_library"],
@@ -42,7 +40,6 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
],
)

View File

@@ -1,33 +0,0 @@
package signing
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
var ErrNilRegistration = errors.New("nil signed registration")
// VerifyRegistrationSignature verifies the signature of a validator's registration.
func VerifyRegistrationSignature(
e types.Epoch,
f *ethpb.Fork,
sr *ethpb.SignedValidatorRegistrationV1,
genesisRoot []byte,
) error {
if sr == nil || sr.Message == nil {
return ErrNilRegistration
}
d := params.BeaconConfig().DomainApplicationBuilder
sd, err := Domain(f, e, d, genesisRoot)
if err != nil {
return err
}
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
return ErrSigFailedToVerify
}
return nil
}

View File

@@ -1,44 +0,0 @@
package signing_test
import (
"testing"
"time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
func TestVerifyRegistrationSignature(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
reg := &ethpb.ValidatorRegistrationV1{
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
GasLimit: 123456,
Timestamp: uint64(time.Now().Unix()),
Pubkey: sk.PublicKey().Marshal(),
}
st, _ := util.DeterministicGenesisState(t, 1)
d := params.BeaconConfig().DomainApplicationBuilder
e := slots.ToEpoch(st.Slot())
sig, err := signing.ComputeDomainAndSign(st, e, reg, d, sk)
require.NoError(t, err)
sReg := &ethpb.SignedValidatorRegistrationV1{
Message: reg,
Signature: sig,
}
f := st.Fork()
g := st.GenesisValidatorsRoot()
require.NoError(t, signing.VerifyRegistrationSignature(e, f, sReg, g))
sReg.Signature = []byte("bad")
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrSigFailedToVerify)
sReg.Message = nil
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrNilRegistration)
}

View File

@@ -53,7 +53,7 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
// some attestations in block are from previous epoch
currentSlot := beaconState.Slot()
require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch))
require.NoError(b, helpers.UpdateCommitteeCache(context.Background(), beaconState, time.CurrentEpoch(beaconState)))
require.NoError(b, helpers.UpdateCommitteeCache(beaconState, time.CurrentEpoch(beaconState)))
require.NoError(b, beaconState.SetSlot(currentSlot))
// Run the state transition once to populate the cache.
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
@@ -81,7 +81,7 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
// some attestations in block are from previous epoch
currentSlot := beaconState.Slot()
require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch))
require.NoError(b, helpers.UpdateCommitteeCache(context.Background(), beaconState, time.CurrentEpoch(beaconState)))
require.NoError(b, helpers.UpdateCommitteeCache(beaconState, time.CurrentEpoch(beaconState)))
require.NoError(b, beaconState.SetSlot(currentSlot))
b.ResetTimer()

View File

@@ -403,51 +403,33 @@ func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotBlocksBelow")
defer span.End()
var root [32]byte
sk := bytesutil.Uint64ToBytesBigEndian(uint64(slot))
err := s.db.View(func(tx *bolt.Tx) error {
var best []byte
if err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blockSlotIndicesBucket)
// Iterate through the index, which is in byte sorted order.
c := bkt.Cursor()
// The documentation for Seek says:
// "If the key does not exist then the next key is used. If no keys follow, a nil key is returned."
sl, r := c.Seek(sk)
// So if there are slots in the index higher than the requested slot, sl will be equal to the key that is
// one higher than the value we want. If the slot argument is higher than the highest value in the index,
// we'll get a nil value for `sl`. In that case we'll go backwards from Cursor.Last().
if sl == nil {
sl, r = c.Last()
}
for {
for s, root := c.First(); s != nil; s, root = c.Next() {
if ctx.Err() != nil {
return ctx.Err()
}
// termination condition: when .Prev() rewinds past the beginning off the collection, key will be nil.
// breaking without setting `root` means `root` will be the zero value, so this function will return
// the genesis block.
if sl == nil {
break
}
if r == nil {
key := bytesutil.BytesToSlotBigEndian(s)
if root == nil {
continue
}
bs := bytesutil.BytesToSlotBigEndian(sl)
// Iterating through the index using .Prev will move from higher to lower, so the first key we find behind
// the requested slot must be the highest block below that slot.
if slot > bs {
root = bytesutil.ToBytes32(r)
if key >= slot {
break
}
sl, r = c.Prev()
best = root
}
return nil
})
if err != nil {
}); err != nil {
return nil, err
}
var blk interfaces.SignedBeaconBlock
if root != params.BeaconConfig().ZeroHash {
blk, err = s.Block(ctx, root)
var err error
if best != nil {
blk, err = s.Block(ctx, bytesutil.ToBytes32(best))
if err != nil {
return nil, err
}
@@ -458,6 +440,7 @@ func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]
return nil, err
}
}
return []interfaces.SignedBeaconBlock{blk}, nil
}

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"error.go",
"interfaces.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice",
@@ -17,6 +16,5 @@ go_library(
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -20,8 +20,6 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -51,16 +49,13 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
],
)

View File

@@ -5,9 +5,6 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
@@ -382,74 +379,3 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *pbrpc.Checkpoint) error {
f.store.finalizedEpoch = fc.Epoch
return nil
}
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublelinkedtree.CommonAncestorRoot")
defer span.End()
// Do nothing if the input roots are the same.
if r1 == r2 {
return r1, nil
}
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
n1, ok := f.store.nodeByRoot[r1]
if !ok || n1 == nil {
return [32]byte{}, ErrNilNode
}
n2, ok := f.store.nodeByRoot[r2]
if !ok || n2 == nil {
return [32]byte{}, ErrNilNode
}
for {
if ctx.Err() != nil {
return [32]byte{}, ctx.Err()
}
if n1.slot > n2.slot {
n1 = n1.parent
// Reaches the end of the tree and unable to find common ancestor.
if n1 == nil {
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
} else {
n2 = n2.parent
// Reaches the end of the tree and unable to find common ancestor.
if n2 == nil {
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
}
if n1 == n2 {
return n1.root, nil
}
}
}
// InsertOptimisticChain inserts all nodes corresponding to blocks in the slice
// `blocks`. This slice must be ordered from parent to child. It includes all
// blocks **except** the first one. All blocks are assumed to be a strict chain
// where blocks[i].Parent = blocks[i+1]. Also we assume that the parent of the
// last block in this list is already included in forkchoice store.
func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
if len(chain) == 0 {
return nil
}
for i := len(chain) - 1; i > 0; i-- {
b := chain[i].Block
r := bytesutil.ToBytes32(chain[i-1].Block.ParentRoot())
parentRoot := bytesutil.ToBytes32(b.ParentRoot())
payloadHash, err := blocks.GetBlockPayloadHash(b)
if err != nil {
return err
}
if err := f.store.insert(ctx,
b.Slot(), r, parentRoot, payloadHash,
chain[i].JustifiedEpoch, chain[i].FinalizedEpoch); err != nil {
return err
}
}
return nil
}

View File

@@ -5,17 +5,13 @@ import (
"encoding/binary"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
@@ -255,191 +251,3 @@ func TestStore_UpdateCheckpoints(t *testing.T) {
require.Equal(t, f.store.justifiedEpoch, jc.Epoch)
require.Equal(t, f.store.finalizedEpoch, fc.Epoch)
}
func TestStore_CommonAncestor(t *testing.T) {
ctx := context.Background()
f := setup(0, 0)
// /-- b -- d -- e
// a
// \-- c -- f
// \-- g
// \ -- h -- i -- j
require.NoError(t, f.InsertOptimisticBlock(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1))
tests := []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
}{
{
name: "Common ancestor between c and b is a",
r1: [32]byte{'c'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between c and d is a",
r1: [32]byte{'c'},
r2: [32]byte{'d'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between c and e is a",
r1: [32]byte{'c'},
r2: [32]byte{'e'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between g and f is c",
r1: [32]byte{'g'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between f and h is c",
r1: [32]byte{'f'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between g and h is c",
r1: [32]byte{'g'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between b and h is a",
r1: [32]byte{'b'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'e'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between i and f is c",
r1: [32]byte{'i'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'j'},
r2: [32]byte{'g'},
wantRoot: [32]byte{'c'},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
})
}
// a -- b -- c -- d
f = setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
tests = []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
}{
{
name: "Common ancestor between a and b is a",
r1: [32]byte{'a'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between b and d is b",
r1: [32]byte{'d'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'b'},
},
{
name: "Common ancestor between d and a is a",
r1: [32]byte{'d'},
r2: [32]byte{'a'},
wantRoot: [32]byte{'a'},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
})
}
// Equal inputs should return the same root.
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
require.NoError(t, err)
require.Equal(t, [32]byte{'b'}, r)
// Requesting finalized root (last node) should return the same root.
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, r)
// Requesting unknown root
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
require.ErrorIs(t, err, ErrNilNode)
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
require.ErrorIs(t, err, ErrNilNode)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'y'}, [32]byte{'z'}, [32]byte{}, 1, 1))
// broken link
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
}
func TestStore_InsertOptimisticChain(t *testing.T) {
f := setup(1, 1)
blks := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
blk := util.NewBeaconBlock()
blk.Block.Slot = 1
pr := [32]byte{}
blk.Block.ParentRoot = pr[:]
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(), JustifiedEpoch: 1,
FinalizedEpoch: 1})
for i := uint64(2); i < 11; i++ {
blk := util.NewBeaconBlock()
blk.Block.Slot = types.Slot(i)
copiedRoot := root
blk.Block.ParentRoot = copiedRoot[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(), JustifiedEpoch: 1,
FinalizedEpoch: 1})
root, err = blk.Block.HashTreeRoot()
require.NoError(t, err)
}
args := make([]*forkchoicetypes.BlockAndCheckpoints, 10)
for i := 0; i < len(blks); i++ {
args[i] = blks[10-i-1]
}
require.NoError(t, f.InsertOptimisticChain(context.Background(), args))
f = setup(1, 1)
require.NoError(t, f.InsertOptimisticChain(context.Background(), args[2:]))
}

View File

@@ -1,5 +0,0 @@
package forkchoice
import "github.com/pkg/errors"
var ErrUnknownCommonAncestor = errors.New("unknown common ancestor")

View File

@@ -37,7 +37,6 @@ type BlockProcessor interface {
justifiedEpoch types.Epoch,
finalizedEpoch types.Epoch,
) error
InsertOptimisticChain(context.Context, []*forkchoicetypes.BlockAndCheckpoints) error
}
// AttestationProcessor processes the attestation that's used for accounting fork choice.
@@ -63,7 +62,6 @@ type Getter interface {
ProposerBoost() [fieldparams.RootLength]byte
HasParent(root [32]byte) bool
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error)
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
IsCanonical(root [32]byte) bool
FinalizedEpoch() types.Epoch
JustifiedEpoch() types.Epoch

View File

@@ -20,8 +20,6 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -52,16 +50,13 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
],
)

View File

@@ -6,9 +6,6 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
@@ -198,52 +195,6 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
return f.store.nodes[i].root[:], nil
}
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArray.CommonAncestorRoot")
defer span.End()
// Do nothing if the two input roots are the same.
if r1 == r2 {
return r1, nil
}
i1, ok := f.store.nodesIndices[r1]
if !ok || i1 >= uint64(len(f.store.nodes)) {
return [32]byte{}, errInvalidNodeIndex
}
i2, ok := f.store.nodesIndices[r2]
if !ok || i2 >= uint64(len(f.store.nodes)) {
return [32]byte{}, errInvalidNodeIndex
}
for {
if ctx.Err() != nil {
return [32]byte{}, ctx.Err()
}
if i1 > i2 {
n1 := f.store.nodes[i1]
i1 = n1.parent
// Reaches the end of the tree and unable to find common ancestor.
if i1 >= uint64(len(f.store.nodes)) {
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
} else {
n2 := f.store.nodes[i2]
i2 = n2.parent
// Reaches the end of the tree and unable to find common ancestor.
if i2 >= uint64(len(f.store.nodes)) {
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
}
if i1 == i2 {
n1 := f.store.nodes[i1]
return n1.root, nil
}
}
}
// PruneThreshold of fork choice store.
func (s *Store) PruneThreshold() uint64 {
return s.pruneThreshold
@@ -854,26 +805,3 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *pbrpc.Checkpoint) error {
f.store.finalizedEpoch = fc.Epoch
return nil
}
// InsertOptimisticChain inserts all nodes corresponding to blocks in the slice
// `blocks`. It includes all blocks **except** the first one.
func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
if len(chain) == 0 {
return nil
}
for i := len(chain) - 1; i > 0; i-- {
b := chain[i].Block
r := bytesutil.ToBytes32(chain[i-1].Block.ParentRoot())
parentRoot := bytesutil.ToBytes32(b.ParentRoot())
payloadHash, err := blocks.GetBlockPayloadHash(b)
if err != nil {
return err
}
if err := f.store.insert(ctx,
b.Slot(), r, parentRoot, payloadHash,
chain[i].JustifiedEpoch, chain[i].FinalizedEpoch); err != nil {
return err
}
}
return nil
}

View File

@@ -4,16 +4,12 @@ import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestStore_PruneThreshold(t *testing.T) {
@@ -564,159 +560,6 @@ func TestStore_PruneBranched(t *testing.T) {
}
}
func TestStore_CommonAncestor(t *testing.T) {
ctx := context.Background()
f := setup(0, 0)
// /-- b -- d -- e
// a
// \-- c -- f
// \-- g
// \ -- h -- i -- j
require.NoError(t, f.InsertOptimisticBlock(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1))
tests := []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
}{
{
name: "Common ancestor between c and b is a",
r1: [32]byte{'c'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between c and d is a",
r1: [32]byte{'c'},
r2: [32]byte{'d'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between c and e is a",
r1: [32]byte{'c'},
r2: [32]byte{'e'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between g and f is c",
r1: [32]byte{'g'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between f and h is c",
r1: [32]byte{'f'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between g and h is c",
r1: [32]byte{'g'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between b and h is a",
r1: [32]byte{'b'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'e'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between i and f is c",
r1: [32]byte{'i'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'j'},
r2: [32]byte{'g'},
wantRoot: [32]byte{'c'},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
})
}
// a -- b -- c -- d
f = setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
tests = []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
}{
{
name: "Common ancestor between a and b is a",
r1: [32]byte{'a'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
},
{
name: "Common ancestor between b and d is b",
r1: [32]byte{'d'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'b'},
},
{
name: "Common ancestor between d and a is a",
r1: [32]byte{'d'},
r2: [32]byte{'a'},
wantRoot: [32]byte{'a'},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
})
}
// Equal inputs should return the same root.
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
require.NoError(t, err)
require.Equal(t, [32]byte{'b'}, r)
// Requesting finalized root (last node) should return the same root.
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, r)
// Requesting unknown root
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
require.ErrorIs(t, err, errInvalidNodeIndex)
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
require.ErrorIs(t, err, errInvalidNodeIndex)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'y'}, [32]byte{'z'}, [32]byte{}, 1, 1))
// broken link
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
}
func TestStore_LeadsToViableHead(t *testing.T) {
tests := []struct {
n *Node
@@ -979,38 +822,3 @@ func TestStore_UpdateCheckpoints(t *testing.T) {
require.Equal(t, f.store.justifiedEpoch, jc.Epoch)
require.Equal(t, f.store.finalizedEpoch, fc.Epoch)
}
func TestStore_InsertOptimisticChain(t *testing.T) {
f := setup(1, 1)
blks := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
blk := util.NewBeaconBlock()
blk.Block.Slot = 1
pr := [32]byte{}
blk.Block.ParentRoot = pr[:]
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(), JustifiedEpoch: 1,
FinalizedEpoch: 1})
for i := uint64(2); i < 11; i++ {
blk := util.NewBeaconBlock()
blk.Block.Slot = types.Slot(i)
copiedRoot := root
blk.Block.ParentRoot = copiedRoot[:]
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(), JustifiedEpoch: 1,
FinalizedEpoch: 1})
root, err = blk.Block.HashTreeRoot()
require.NoError(t, err)
}
args := make([]*forkchoicetypes.BlockAndCheckpoints, 10)
for i := 0; i < len(blks); i++ {
args[i] = blks[10-i-1]
}
require.NoError(t, f.InsertOptimisticChain(context.Background(), args))
f = setup(1, 1)
require.NoError(t, f.InsertOptimisticChain(context.Background(), args[2:]))
}

View File

@@ -5,8 +5,5 @@ go_library(
srcs = ["types.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types",
visibility = ["//visibility:public"],
deps = [
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
],
deps = ["//consensus-types/primitives:go_default_library"],
)

View File

@@ -1,7 +1,6 @@
package types
import (
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
)
@@ -12,10 +11,3 @@ type ProposerBoostRootArgs struct {
CurrentSlot types.Slot
SecondsIntoSlot uint64
}
// BlockAndCheckpoints to call the InsertOptimisticChain function
type BlockAndCheckpoints struct {
Block interfaces.BeaconBlock
JustifiedEpoch types.Epoch
FinalizedEpoch types.Epoch
}

View File

@@ -18,7 +18,6 @@ go_library(
"//api/gateway:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/db:go_default_library",

View File

@@ -129,25 +129,6 @@ func configureInteropConfig(cliCtx *cli.Context) error {
}
func configureExecutionSetting(cliCtx *cli.Context) error {
if cliCtx.IsSet(flags.TerminalTotalDifficultyOverride.Name) {
c := params.BeaconConfig()
c.TerminalTotalDifficulty = cliCtx.String(flags.TerminalTotalDifficultyOverride.Name)
log.WithField("terminal block difficult", c.TerminalTotalDifficulty).Warn("Terminal block difficult overridden")
params.OverrideBeaconConfig(c)
}
if cliCtx.IsSet(flags.TerminalBlockHashOverride.Name) {
c := params.BeaconConfig()
c.TerminalBlockHash = common.HexToHash(cliCtx.String(flags.TerminalBlockHashOverride.Name))
log.WithField("terminal block hash", c.TerminalBlockHash.Hex()).Warn("Terminal block hash overridden")
params.OverrideBeaconConfig(c)
}
if cliCtx.IsSet(flags.TerminalBlockHashActivationEpochOverride.Name) {
c := params.BeaconConfig()
c.TerminalBlockHashActivationEpoch = types.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name))
log.WithField("terminal block hash activation epoch", c.TerminalBlockHashActivationEpoch).Warn("Terminal block hash activation epoch overridden")
params.OverrideBeaconConfig(c)
}
if !cliCtx.IsSet(flags.SuggestedFeeRecipient.Name) {
return nil
}

View File

@@ -92,13 +92,6 @@ func TestConfigureExecutionSetting(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.String(flags.SuggestedFeeRecipient.Name, "", "")
set.Uint64(flags.TerminalTotalDifficultyOverride.Name, 0, "")
set.String(flags.TerminalBlockHashOverride.Name, "", "")
set.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name, 0, "")
require.NoError(t, set.Set(flags.TerminalTotalDifficultyOverride.Name, strconv.Itoa(100)))
require.NoError(t, set.Set(flags.TerminalBlockHashOverride.Name, "0xA"))
require.NoError(t, set.Set(flags.TerminalBlockHashActivationEpochOverride.Name, strconv.Itoa(200)))
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xB"))
cliCtx := cli.NewContext(&app, set, nil)
err := configureExecutionSetting(cliCtx)
@@ -119,10 +112,6 @@ func TestConfigureExecutionSetting(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"), params.BeaconConfig().DefaultFeeRecipient)
assert.Equal(t, "100", params.BeaconConfig().TerminalTotalDifficulty)
assert.Equal(t, common.HexToHash("0xA"), params.BeaconConfig().TerminalBlockHash)
assert.Equal(t, types.Epoch(200), params.BeaconConfig().TerminalBlockHashActivationEpoch)
}
func TestConfigureNetwork(t *testing.T) {

View File

@@ -20,7 +20,6 @@ import (
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
"github.com/prysmaticlabs/prysm/async/event"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
@@ -77,7 +76,6 @@ const debugGrpcMaxMsgSize = 1 << 27
type serviceFlagOpts struct {
blockchainFlagOpts []blockchain.Option
powchainFlagOpts []powchain.Option
builderOpts []builder.Option
}
// BeaconNode defines a struct that handles the services running a random beacon chain
@@ -228,11 +226,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
log.Debugln("Starting Fork Choice")
beacon.startForkChoice()
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(); err != nil {
return nil, err
}
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(); err != nil {
return nil, err
@@ -573,14 +566,6 @@ func (b *BeaconNode) fetchP2P() p2p.P2P {
return p
}
func (b *BeaconNode) fetchBuilderService() *builder.Service {
var s *builder.Service
if err := b.services.FetchService(&s); err != nil {
panic(err)
}
return s
}
func (b *BeaconNode) registerAttestationPool() error {
s, err := attestations.NewService(b.ctx, &attestations.Config{
Pool: b.attestationPool,
@@ -819,7 +804,6 @@ func (b *BeaconNode) registerRPCService() error {
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
BlockBuilder: b.fetchBuilderService(),
OptimisticModeFetcher: chainService,
AttestationsPool: b.attestationPool,
ExitPool: b.exitPool,
@@ -984,12 +968,3 @@ func (b *BeaconNode) registerValidatorMonitorService() error {
}
return b.services.RegisterService(svc)
}
func (b *BeaconNode) registerBuilderService() error {
options := b.serviceFlagOpts.builderOpts
svc, err := builder.NewService(b.ctx, options...)
if err != nil {
return err
}
return b.services.RegisterService(svc)
}

View File

@@ -2,7 +2,6 @@ package node
import (
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
)
@@ -24,11 +23,3 @@ func WithPowchainFlagOptions(opts []powchain.Option) Option {
return nil
}
}
// WithBuilderFlagOptions includes functional options for the builder service related to CLI flags.
func WithBuilderFlagOptions(opts []builder.Option) Option {
return func(bn *BeaconNode) error {
bn.serviceFlagOpts.builderOpts = opts
return nil
}
}

View File

@@ -296,7 +296,7 @@ func handleRPCError(err error) error {
return nil
}
if isTimeout(err) {
return ErrHTTPTimeout
return errors.Wrapf(ErrHTTPTimeout, "%s", err)
}
e, ok := err.(rpc.Error)
if !ok {
@@ -307,7 +307,7 @@ func handleRPCError(err error) error {
"here https://docs.prylabs.network/docs/execution-node/authentication")
return fmt.Errorf("could not authenticate connection to execution client: %v", err)
}
return errors.Wrapf(err, "got an unexpected error in JSON-RPC response")
return errors.Wrap(err, "got an unexpected error")
}
switch e.ErrorCode() {
case -32700:
@@ -330,7 +330,7 @@ func handleRPCError(err error) error {
// Only -32000 status codes are data errors in the RPC specification.
errWithData, ok := err.(rpc.DataError)
if !ok {
return errors.Wrapf(err, "got an unexpected error in JSON-RPC response")
return errors.Wrap(err, "got an unexpected error")
}
return errors.Wrapf(ErrServer, "%v", errWithData.ErrorData())
default:

View File

@@ -16,14 +16,14 @@ var (
getPayloadLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "get_payload_v1_latency_milliseconds",
Help: "Captures RPC latency for getPayloadV1 in milliseconds",
Help: "Captures RPC latency for newPayloadV1 in milliseconds",
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
},
)
forkchoiceUpdatedLatency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "forkchoice_updated_v1_latency_milliseconds",
Help: "Captures RPC latency for forkchoiceUpdatedV1 in milliseconds",
Help: "Captures RPC latency for newPayloadV1 in milliseconds",
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
},
)

View File

@@ -10,7 +10,6 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",

View File

@@ -123,9 +123,6 @@ func TestGetSpec(t *testing.T) {
var daap [4]byte
copy(daap[:], []byte{'0', '0', '0', '7'})
config.DomainAggregateAndProof = daap
var dam [4]byte
copy(dam[:], []byte{'1', '0', '0', '0'})
config.DomainApplicationMask = dam
params.OverrideBeaconConfig(config)
@@ -133,7 +130,7 @@ func TestGetSpec(t *testing.T) {
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 99, len(resp.Data))
assert.Equal(t, 98, len(resp.Data))
for k, v := range resp.Data {
switch k {
case "CONFIG_NAME":
@@ -318,8 +315,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x30303036", v)
case "DOMAIN_AGGREGATE_AND_PROOF":
assert.Equal(t, "0x30303037", v)
case "DOMAIN_APPLICATION_MASK":
assert.Equal(t, "0x31303030", v)
case "DOMAIN_SYNC_COMMITTEE":
assert.Equal(t, "0x07000000", v)
case "DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":

View File

@@ -27,7 +27,6 @@ go_library(
deps = [
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",

View File

@@ -19,7 +19,6 @@ import (
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -58,7 +57,12 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
return nil, err
}
return vs.getBellatrixBeaconBlock(ctx, req)
blk, err := vs.getBellatrixBeaconBlock(ctx, req)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not fetch Bellatrix beacon block: %v", err)
}
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Bellatrix{Bellatrix: blk}}, nil
}
// GetBlock is called by a proposer during its assigned slot to request a block to sign
@@ -129,20 +133,9 @@ func (vs *Server) PrepareBeaconProposer(
return &emptypb.Empty{}, nil
}
// SubmitValidatorRegistration submits validator registration.
func (vs *Server) SubmitValidatorRegistration(context.Context, *ethpb.SignedValidatorRegistrationV1) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.SignedBeaconBlock) (*ethpb.ProposeResponse, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.proposeGenericBeaconBlock")
defer span.End()
blk, err := vs.getBuilderBlock(ctx, blk)
if err != nil {
return nil, err
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("could not tree hash block: %v", err)
@@ -175,75 +168,6 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
}, nil
}
func (vs *Server) getBuilderBlock(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
if b.Version() != version.BellatrixBlind {
return b, nil
}
if vs.BlockBuilder.Status() != nil {
return nil, status.Errorf(codes.FailedPrecondition, "Block builder is not running")
}
agg, err := b.Block().Body().SyncAggregate()
if err != nil {
return nil, err
}
h, err := b.Block().Body().ExecutionPayloadHeader()
if err != nil {
return nil, err
}
sb := &ethpb.SignedBlindedBeaconBlockBellatrix{
Block: &ethpb.BlindedBeaconBlockBellatrix{
Slot: b.Block().Slot(),
ProposerIndex: b.Block().ProposerIndex(),
ParentRoot: b.Block().ParentRoot(),
StateRoot: b.Block().StateRoot(),
Body: &ethpb.BlindedBeaconBlockBodyBellatrix{
RandaoReveal: b.Block().Body().RandaoReveal(),
Eth1Data: b.Block().Body().Eth1Data(),
Graffiti: b.Block().Body().Graffiti(),
ProposerSlashings: b.Block().Body().ProposerSlashings(),
AttesterSlashings: b.Block().Body().AttesterSlashings(),
Attestations: b.Block().Body().Attestations(),
Deposits: b.Block().Body().Deposits(),
VoluntaryExits: b.Block().Body().VoluntaryExits(),
SyncAggregate: agg,
ExecutionPayloadHeader: h,
},
},
Signature: nil,
}
payload, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, sb)
if err != nil {
return nil, err
}
bb := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: sb.Block.Slot,
ProposerIndex: sb.Block.ProposerIndex,
ParentRoot: sb.Block.ParentRoot,
StateRoot: sb.Block.StateRoot,
Body: &ethpb.BeaconBlockBodyBellatrix{
RandaoReveal: sb.Block.Body.RandaoReveal,
Eth1Data: sb.Block.Body.Eth1Data,
Graffiti: sb.Block.Body.Graffiti,
ProposerSlashings: sb.Block.Body.ProposerSlashings,
AttesterSlashings: sb.Block.Body.AttesterSlashings,
Attestations: sb.Block.Body.Attestations,
Deposits: sb.Block.Body.Deposits,
VoluntaryExits: sb.Block.Body.VoluntaryExits,
SyncAggregate: agg,
ExecutionPayload: payload,
},
},
Signature: nil,
}
wb, err := wrapper.WrappedSignedBeaconBlock(bb)
if err != nil {
return nil, err
}
return wb, nil
}
// computeStateRoot computes the state root after a block has been processed through a state transition and
// returns it to the validator client.
func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedBeaconBlock) ([]byte, error) {
@@ -263,8 +187,3 @@ func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedB
log.WithField("beaconStateRoot", fmt.Sprintf("%#x", root)).Debugf("Computed state root")
return root[:], nil
}
// SubmitValidatorRegistration submits validator registration.
func (vs *Server) SubmitValidatorRegistration(context.Context, *ethpb.SignedValidatorRegistrationV1) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}

View File

@@ -4,59 +4,18 @@ import (
"context"
"fmt"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition/interop"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockBellatrix, error) {
altairBlk, err := vs.buildAltairBeaconBlock(ctx, req)
if err != nil {
return nil, err
}
h, exists, err := vs.getBuilderHeader(ctx, req.Slot, altairBlk.ProposerIndex)
if err != nil {
return nil, err
}
if exists {
blk := &ethpb.BlindedBeaconBlockBellatrix{
Slot: altairBlk.Slot,
ProposerIndex: altairBlk.ProposerIndex,
ParentRoot: altairBlk.ParentRoot,
StateRoot: params.BeaconConfig().ZeroHash[:],
Body: &ethpb.BlindedBeaconBlockBodyBellatrix{
RandaoReveal: altairBlk.Body.RandaoReveal,
Eth1Data: altairBlk.Body.Eth1Data,
Graffiti: altairBlk.Body.Graffiti,
ProposerSlashings: altairBlk.Body.ProposerSlashings,
AttesterSlashings: altairBlk.Body.AttesterSlashings,
Attestations: altairBlk.Body.Attestations,
Deposits: altairBlk.Body.Deposits,
VoluntaryExits: altairBlk.Body.VoluntaryExits,
SyncAggregate: altairBlk.Body.SyncAggregate,
ExecutionPayloadHeader: h,
},
}
wsb, err := wrapper.WrappedSignedBeaconBlock(
&ethpb.SignedBlindedBeaconBlockBellatrix{Block: blk, Signature: make([]byte, 96)},
)
if err != nil {
return nil, err
}
stateRoot, err := vs.computeStateRoot(ctx, wsb)
if err != nil {
interop.WriteBlockToDisk(wsb, true /*failed*/)
return nil, fmt.Errorf("could not compute state root: %v", err)
}
blk.StateRoot = stateRoot
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_BlindedBellatrix{BlindedBellatrix: blk}}, nil
}
payload, err := vs.getExecutionPayload(ctx, req.Slot, altairBlk.ProposerIndex)
if err != nil {
return nil, err
@@ -93,33 +52,5 @@ func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockR
return nil, fmt.Errorf("could not compute state root: %v", err)
}
blk.StateRoot = stateRoot
return &ethpb.GenericBeaconBlock{Block: &ethpb.GenericBeaconBlock_Bellatrix{Bellatrix: blk}}, nil
}
func (vs *Server) getBuilderHeader(ctx context.Context, slot types.Slot, idx types.ValidatorIndex) (*ethpb.ExecutionPayloadHeader, bool, error) {
if vs.BlockBuilder.Status() != nil {
log.WithError(vs.BlockBuilder.Status()).Error("Could not get builder status")
return nil, false, nil
}
b, err := vs.HeadFetcher.HeadBlock(ctx)
if err != nil {
return nil, false, err
}
if blocks.IsPreBellatrixVersion(b.Version()) {
return nil, false, nil
}
h, err := b.Block().Body().ExecutionPayload()
if err != nil {
return nil, false, err
}
pk, err := vs.HeadFetcher.HeadValidatorIndexToPublicKey(ctx, idx)
if err != nil {
return nil, false, err
}
bid, err := vs.BlockBuilder.GetHeader(ctx, slot, bytesutil.ToBytes32(h.BlockHash), pk)
if err != nil {
log.WithError(err).Error("Could not get builder header")
return nil, false, nil
}
return bid.Message.Header, true, nil
return blk, nil
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
@@ -70,7 +69,6 @@ type Server struct {
ReplayerBuilder stategen.ReplayerBuilder
BeaconDB db.HeadAccessDatabase
ExecutionEngineCaller powchain.EngineCaller
BlockBuilder builder.BlockBuilder
}
// WaitForActivation checks if a validator public key exists in the active validator registry of the current

View File

@@ -14,7 +14,6 @@ import (
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
@@ -91,7 +90,6 @@ type Config struct {
POWChainInfoFetcher powchain.ChainInfoFetcher
GenesisTimeFetcher blockchain.TimeFetcher
GenesisFetcher blockchain.GenesisFetcher
BlockBuilder builder.BlockBuilder
EnableDebugRPCEndpoints bool
MockEth1Votes bool
AttestationsPool attestations.Pool
@@ -218,7 +216,6 @@ func (s *Service) Start() {
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
BeaconDB: s.cfg.BeaconDB,
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
BlockBuilder: s.cfg.BlockBuilder,
}
validatorServerV1 := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,

View File

@@ -29,7 +29,6 @@ go_test(
"field_trie_test.go",
"helpers_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//beacon-chain/state/stateutil:go_default_library",

View File

@@ -10,11 +10,6 @@ import (
pmath "github.com/prysmaticlabs/prysm/math"
)
var (
ErrInvalidFieldTrie = errors.New("invalid field trie")
ErrEmptyFieldTrie = errors.New("empty field trie")
)
// FieldTrie is the representation of the representative
// trie of the particular field.
type FieldTrie struct {
@@ -193,12 +188,6 @@ func (f *FieldTrie) CopyTrie() *FieldTrie {
// TrieRoot returns the corresponding root of the trie.
func (f *FieldTrie) TrieRoot() ([32]byte, error) {
if f.Empty() {
return [32]byte{}, ErrEmptyFieldTrie
}
if len(f.fieldLayers[len(f.fieldLayers)-1]) == 0 {
return [32]byte{}, ErrInvalidFieldTrie
}
switch f.dataType {
case types.BasicArray:
return *f.fieldLayers[len(f.fieldLayers)-1][0], nil

View File

@@ -27,13 +27,6 @@ func TestFieldTrie_NewTrie(t *testing.T) {
assert.Equal(t, root, newRoot)
}
func TestFieldTrie_NewTrie_NilElements(t *testing.T) {
trie, err := fieldtrie.NewFieldTrie(stateTypes.FieldIndex(5), stateTypes.BasicArray, nil, 8234)
require.NoError(t, err)
_, err = trie.TrieRoot()
require.ErrorIs(t, err, fieldtrie.ErrEmptyFieldTrie)
}
func TestFieldTrie_RecomputeTrie(t *testing.T) {
newState, _ := util.DeterministicGenesisState(t, 32)
// 10 represents the enum value of validators
@@ -84,27 +77,3 @@ func TestFieldTrie_CopyTrieImmutable(t *testing.T) {
t.Errorf("Wanted roots to be different, but they are the same: %#x", root)
}
}
func FuzzFieldTrie(f *testing.F) {
newState, _ := util.DeterministicGenesisState(f, 40)
var data []byte
for _, root := range newState.StateRoots() {
data = append(data, root...)
}
f.Add(5, int(stateTypes.BasicArray), data, uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
f.Fuzz(func(t *testing.T, idx, typ int, data []byte, slotsPerHistRoot uint64) {
var roots [][]byte
for i := 32; i < len(data); i += 32 {
roots = append(roots, data[i-32:i])
}
trie, err := fieldtrie.NewFieldTrie(stateTypes.FieldIndex(idx), stateTypes.DataType(typ), roots, slotsPerHistRoot)
if err != nil {
return // invalid inputs
}
_, err = trie.TrieRoot()
if err != nil {
return
}
})
}

View File

@@ -1,5 +0,0 @@
go test fuzz v1
int(5)
int(0)
[]byte("")
uint64(8234)

View File

@@ -223,7 +223,6 @@ type FutureForkStub interface {
AppendInactivityScore(s uint64) error
CurrentEpochParticipation() ([]byte, error)
PreviousEpochParticipation() ([]byte, error)
UnrealizedCheckpointBalances() (uint64, uint64, uint64, error)
InactivityScores() ([]uint64, error)
SetInactivityScores(val []uint64) error
CurrentSyncCommittee() (*ethpb.SyncCommittee, error)

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"error.go",
"getters_attestation.go",
"getters_block.go",
"getters_checkpoint.go",
@@ -55,7 +54,6 @@ go_library(
"//tools/pcli:__pkg__",
],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/fieldtrie:go_default_library",
"//beacon-chain/state/state-native/custom-types:go_default_library",
@@ -85,7 +83,6 @@ go_test(
"getters_attestation_test.go",
"getters_block_test.go",
"getters_checkpoint_test.go",
"getters_participation_test.go",
"getters_test.go",
"getters_validator_test.go",
"hasher_test.go",

View File

@@ -1,5 +0,0 @@
package state_native
import "errors"
var ErrNilParticipation = errors.New("Nil epoch participation in state")

View File

@@ -1,8 +1,6 @@
package state_native
import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/runtime/version"
)
@@ -38,28 +36,6 @@ func (b *BeaconState) PreviousEpochParticipation() ([]byte, error) {
return b.previousEpochParticipationVal(), nil
}
// UnrealizedCheckpointBalances returns the total balances: active, target attested in
// current epoch and target attested in previous epoch. This function is used to
// compute the "unrealized justification" that a synced Beacon Block will have.
func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, error) {
if b.version == version.Phase0 {
return 0, 0, 0, errNotSupported("UnrealizedCheckpointBalances", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
cp := b.currentEpochParticipation
pp := b.previousEpochParticipation
if cp == nil || pp == nil {
return 0, 0, 0, ErrNilParticipation
}
currentEpoch := time.CurrentEpoch(b)
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.validators, currentEpoch)
}
// currentEpochParticipationVal corresponding to participation bits on the beacon chain.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) currentEpochParticipationVal() []byte {

View File

@@ -1,64 +0,0 @@
package state_native
import (
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
base := &ethpb.BeaconStateAltair{
Slot: 2,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
Balances: balances,
}
state, err := InitializeFromProtoAltair(base)
require.NoError(t, err)
// No one voted in the last two epochs
allActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
active, previous, current, err := state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, uint64(0), current)
require.Equal(t, uint64(0), previous)
// Add some votes in the last two epochs:
base.CurrentEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[1] = 0xFF
state, err = InitializeFromProtoAltair(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(t, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
// Slash some validators
validators[0].Slashed = true
state, err = InitializeFromProtoAltair(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive-params.BeaconConfig().MaxEffectiveBalance, active)
require.Equal(t, uint64(0), current)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, previous)
}

View File

@@ -15,7 +15,6 @@ go_library(
"state_hasher.go",
"sync_committee.root.go",
"trie_helpers.go",
"unrealized_justification.go",
"validator_map_handler.go",
"validator_root.go",
],
@@ -58,7 +57,6 @@ go_test(
"reference_bench_test.go",
"state_root_test.go",
"trie_helpers_test.go",
"unrealized_justification_test.go",
"validator_root_test.go",
],
embed = [":go_default_library"],

View File

@@ -1,43 +0,0 @@
package stateutil
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/math"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
func UnrealizedCheckpointBalances(cp, pp []byte, validators []*ethpb.Validator, currentEpoch types.Epoch) (uint64, uint64, uint64, error) {
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
activeBalance := uint64(0)
currentTarget := uint64(0)
prevTarget := uint64(0)
if len(cp) < len(validators) || len(pp) < len(validators) {
return 0, 0, 0, errors.New("participation does not match validator set")
}
var err error
for i, v := range validators {
active := v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch
if active && !v.Slashed {
activeBalance, err = math.Add64(activeBalance, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
if ((cp[i] >> targetIdx) & 1) == 1 {
currentTarget, err = math.Add64(currentTarget, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
}
if ((pp[i] >> targetIdx) & 1) == 1 {
prevTarget, err = math.Add64(prevTarget, v.EffectiveBalance)
if err != nil {
return 0, 0, 0, err
}
}
}
}
return activeBalance, prevTarget, currentTarget, nil
}

View File

@@ -1,95 +0,0 @@
package stateutil
import (
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
targetFlag := params.BeaconConfig().TimelyTargetFlagIndex
expectedActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
cp := make([]byte, len(validators))
pp := make([]byte, len(validators))
t.Run("No one voted last two epochs", func(tt *testing.T) {
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 0)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
})
t.Run("bad votes in last two epochs", func(tt *testing.T) {
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00})
copy(pp, []byte{0x00, 0x00, 0x00, 0x00})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, uint64(0), previous)
})
t.Run("two votes in last epoch", func(tt *testing.T) {
copy(cp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
copy(pp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag)})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(tt, uint64(0), previous)
})
t.Run("two votes in previous epoch", func(tt *testing.T) {
copy(cp, []byte{0x00, 0x00, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0x00})
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 1 << targetFlag, 1 << targetFlag})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
require.Equal(tt, expectedActive, active)
require.Equal(tt, uint64(0), current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
t.Run("votes in both epochs, decreased balance in first validator", func(tt *testing.T) {
validators[0].EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().MinDepositAmount
copy(cp, []byte{0xFF, 0xFF, 0x00, 0x00, 0xFF ^ (1 << targetFlag), 0})
copy(pp, []byte{0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0xFF ^ (1 << targetFlag), 0x00, 0xFF, 0xFF})
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MinDepositAmount
require.Equal(tt, expectedActive, active)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
t.Run("slash a validator", func(tt *testing.T) {
validators[1].Slashed = true
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 1)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
})
t.Run("Exit a validator", func(tt *testing.T) {
validators[4].ExitEpoch = 1
active, previous, current, err := UnrealizedCheckpointBalances(cp, pp, validators, 2)
require.NoError(tt, err)
expectedActive -= params.BeaconConfig().MaxEffectiveBalance
require.Equal(tt, expectedActive, active)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance-params.BeaconConfig().MinDepositAmount, current)
require.Equal(tt, params.BeaconConfig().MaxEffectiveBalance, previous)
})
}

View File

@@ -63,8 +63,6 @@ func TestNilState_NoPanic(t *testing.T) {
_ = st.PreviousJustifiedCheckpoint()
_ = st.CurrentJustifiedCheckpoint()
_ = st.FinalizedCheckpoint()
_, _, _, err = st.UnrealizedCheckpointBalances()
_ = err
}
func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {

View File

@@ -15,11 +15,6 @@ func (*BeaconState) PreviousEpochParticipation() ([]byte, error) {
return nil, errors.New("PreviousEpochParticipation is not supported for phase 0 beacon state")
}
// UnrealizedCheckpointBalances is not supported for phase 0 beacon state.
func (*BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, error) {
return 0, 0, 0, errors.New("UnrealizedCheckpointBalances is not supported for phase0 beacon state")
}
// InactivityScores is not supported for phase 0 beacon state.
func (*BeaconState) InactivityScores() ([]uint64, error) {
return nil, errors.New("InactivityScores is not supported for phase 0 beacon state")

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"deprecated_getters.go",
"deprecated_setters.go",
"error.go",
"field_roots.go",
"getters_block.go",
"getters_checkpoint.go",
@@ -33,7 +32,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v2",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/fieldtrie:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -64,7 +62,6 @@ go_test(
"deprecated_setters_test.go",
"getters_block_test.go",
"getters_checkpoint_test.go",
"getters_participation_test.go",
"getters_test.go",
"getters_validator_test.go",
"proofs_test.go",

View File

@@ -1,5 +0,0 @@
package v2
import "errors"
var ErrNilParticipation = errors.New("Nil epoch participation in state")

View File

@@ -1,10 +1,5 @@
package v2
import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
)
// CurrentEpochParticipation corresponding to participation bits on the beacon chain.
func (b *BeaconState) CurrentEpochParticipation() ([]byte, error) {
if !b.hasInnerState() {
@@ -35,26 +30,6 @@ func (b *BeaconState) PreviousEpochParticipation() ([]byte, error) {
return b.previousEpochParticipation(), nil
}
// UnrealizedCheckpointBalances returns the total balances: active, target attested in
// current epoch and target attested in previous epoch. This function is used to
// compute the "unrealized justification" that a synced Beacon Block will have.
func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, error) {
if !b.hasInnerState() {
return 0, 0, 0, ErrNilInnerState
}
b.lock.RLock()
defer b.lock.RUnlock()
cp := b.state.CurrentEpochParticipation
pp := b.state.PreviousEpochParticipation
if cp == nil || pp == nil {
return 0, 0, 0, ErrNilParticipation
}
currentEpoch := time.CurrentEpoch(b)
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.state.Validators, currentEpoch)
}
// currentEpochParticipation corresponding to participation bits on the beacon chain.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) currentEpochParticipation() []byte {

View File

@@ -1,64 +0,0 @@
package v2
import (
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
base := &ethpb.BeaconStateAltair{
Slot: 2,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
Balances: balances,
}
state, err := InitializeFromProto(base)
require.NoError(t, err)
// No one voted in the last two epochs
allActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
active, previous, current, err := state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, uint64(0), current)
require.Equal(t, uint64(0), previous)
// Add some votes in the last two epochs:
base.CurrentEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[1] = 0xFF
state, err = InitializeFromProto(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(t, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
// Slash some validators
validators[0].Slashed = true
state, err = InitializeFromProto(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive-params.BeaconConfig().MaxEffectiveBalance, active)
require.Equal(t, uint64(0), current)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, previous)
}

View File

@@ -74,8 +74,6 @@ func TestNilState_NoPanic(t *testing.T) {
require.ErrorIs(t, ErrNilInnerState, err)
_, err = st.NextSyncCommittee()
require.ErrorIs(t, ErrNilInnerState, err)
_, _, _, err = st.UnrealizedCheckpointBalances()
require.ErrorIs(t, ErrNilInnerState, err)
}
func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"deprecated_getters.go",
"deprecated_setters.go",
"error.go",
"field_roots.go",
"getters_block.go",
"getters_checkpoint.go",
@@ -35,7 +34,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/fieldtrie:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -66,7 +64,6 @@ go_test(
"deprecated_setters_test.go",
"getters_block_test.go",
"getters_checkpoint_test.go",
"getters_participation_test.go",
"getters_test.go",
"getters_validator_test.go",
"proofs_test.go",

View File

@@ -1,5 +0,0 @@
package v3
import "errors"
var ErrNilParticipation = errors.New("Nil epoch participation in state")

View File

@@ -1,10 +1,5 @@
package v3
import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
)
// CurrentEpochParticipation corresponding to participation bits on the beacon chain.
func (b *BeaconState) CurrentEpochParticipation() ([]byte, error) {
if !b.hasInnerState() {
@@ -35,26 +30,6 @@ func (b *BeaconState) PreviousEpochParticipation() ([]byte, error) {
return b.previousEpochParticipation(), nil
}
// UnrealizedCheckpointBalances returns the total balances: active, target attested in
// current epoch and target attested in previous epoch. This function is used to
// compute the "unrealized justification" that a synced Beacon Block will have.
func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, error) {
if !b.hasInnerState() {
return 0, 0, 0, ErrNilInnerState
}
b.lock.RLock()
defer b.lock.RUnlock()
cp := b.state.CurrentEpochParticipation
pp := b.state.PreviousEpochParticipation
if cp == nil || pp == nil {
return 0, 0, 0, ErrNilParticipation
}
currentEpoch := time.CurrentEpoch(b)
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.state.Validators, currentEpoch)
}
// currentEpochParticipation corresponding to participation bits on the beacon chain.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) currentEpochParticipation() []byte {

View File

@@ -1,64 +0,0 @@
package v3
import (
"testing"
"github.com/prysmaticlabs/prysm/config/params"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestState_UnrealizedCheckpointBalances(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalance
}
base := &ethpb.BeaconStateBellatrix{
Slot: 2,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Validators: validators,
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
Balances: balances,
}
state, err := InitializeFromProto(base)
require.NoError(t, err)
// No one voted in the last two epochs
allActive := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
active, previous, current, err := state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, uint64(0), current)
require.Equal(t, uint64(0), previous)
// Add some votes in the last two epochs:
base.CurrentEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[0] = 0xFF
base.PreviousEpochParticipation[1] = 0xFF
state, err = InitializeFromProto(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive, active)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, current)
require.Equal(t, 2*params.BeaconConfig().MaxEffectiveBalance, previous)
// Slash some validators
validators[0].Slashed = true
state, err = InitializeFromProto(base)
require.NoError(t, err)
active, previous, current, err = state.UnrealizedCheckpointBalances()
require.NoError(t, err)
require.Equal(t, allActive-params.BeaconConfig().MaxEffectiveBalance, active)
require.Equal(t, uint64(0), current)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, previous)
}

View File

@@ -75,8 +75,6 @@ func TestNilState_NoPanic(t *testing.T) {
require.ErrorIs(t, ErrNilInnerState, err)
_, err = st.LatestExecutionPayloadHeader()
require.ErrorIs(t, ErrNilInnerState, err)
_, _, _, err = st.UnrealizedCheckpointBalances()
require.ErrorIs(t, ErrNilInnerState, err)
}

View File

@@ -132,8 +132,6 @@ type Service struct {
seenSyncContributionCache *lru.Cache
badBlockCache *lru.Cache
badBlockLock sync.RWMutex
syncContributionBitsOverlapLock sync.RWMutex
syncContributionBitsOverlapCache *lru.Cache
signatureChan chan *signatureVerifier
}
@@ -223,7 +221,6 @@ func (s *Service) initCaches() {
s.seenUnAggregatedAttestationCache = lruwrpr.New(seenUnaggregatedAttSize)
s.seenSyncMessageCache = lruwrpr.New(seenSyncMsgSize)
s.seenSyncContributionCache = lruwrpr.New(seenSyncContributionSize)
s.syncContributionBitsOverlapCache = lruwrpr.New(seenSyncContributionSize)
s.seenExitCache = lruwrpr.New(seenExitSize)
s.seenAttesterSlashingCache = make(map[uint64]bool)
s.seenProposerSlashingCache = lruwrpr.New(seenProposerSlashingSize)

View File

@@ -252,7 +252,7 @@ func (s *Service) setSeenCommitteeIndicesSlot(slot types.Slot, committeeID types
s.seenUnAggregatedAttestationLock.Lock()
defer s.seenUnAggregatedAttestationLock.Unlock()
b := append(bytesutil.Bytes32(uint64(slot)), bytesutil.Bytes32(uint64(committeeID))...)
b = append(b, bytesutil.SafeCopyBytes(aggregateBits)...)
b = append(b, aggregateBits...)
s.seenUnAggregatedAttestationCache.Add(string(b), true)
}

View File

@@ -339,30 +339,3 @@ func TestServiceValidateCommitteeIndexBeaconAttestation_Optimistic(t *testing.T)
valid := res == pubsub.ValidationIgnore
assert.Equal(t, true, valid, "Should have ignore this message")
}
func TestService_setSeenCommitteeIndicesSlot(t *testing.T) {
chainService := &mockChain.ChainService{
Genesis: time.Now(),
ValidatorsRoot: [32]byte{'A'},
}
s := NewService(context.Background(), WithP2P(p2ptest.NewTestP2P(t)), WithStateNotifier(chainService.StateNotifier()))
s.initCaches()
// Empty cache
b0 := []byte{9} // 1001
require.Equal(t, false, s.hasSeenCommitteeIndicesSlot(0, 0, b0))
// Cache some entries but same key
s.setSeenCommitteeIndicesSlot(0, 0, b0)
require.Equal(t, true, s.hasSeenCommitteeIndicesSlot(0, 0, b0))
b1 := []byte{14} // 1110
s.setSeenCommitteeIndicesSlot(0, 0, b1)
require.Equal(t, true, s.hasSeenCommitteeIndicesSlot(0, 0, b0))
require.Equal(t, true, s.hasSeenCommitteeIndicesSlot(0, 0, b1))
// Cache some entries with diff keys
s.setSeenCommitteeIndicesSlot(1, 2, b1)
require.Equal(t, false, s.hasSeenCommitteeIndicesSlot(1, 0, b1))
require.Equal(t, false, s.hasSeenCommitteeIndicesSlot(0, 2, b1))
require.Equal(t, true, s.hasSeenCommitteeIndicesSlot(1, 2, b1))
}

View File

@@ -88,11 +88,7 @@ func (s *Service) validateSyncContributionAndProof(ctx context.Context, pid peer
return result, err
}
con := m.Message.Contribution
if err := s.setSyncContributionBits(con); err != nil {
return pubsub.ValidationIgnore, err
}
s.setSyncContributionIndexSlotSeen(con.Slot, m.Message.AggregatorIndex, types.CommitteeIndex(con.SubcommitteeIndex))
s.setSyncContributionIndexSlotSeen(m.Message.Contribution.Slot, m.Message.AggregatorIndex, types.CommitteeIndex(m.Message.Contribution.SubcommitteeIndex))
msg.ValidatorData = m
@@ -153,15 +149,7 @@ func rejectEmptyContribution(m *ethpb.SignedContributionAndProof) validationFn {
func (s *Service) ignoreSeenSyncContribution(m *ethpb.SignedContributionAndProof) validationFn {
return func(ctx context.Context) (pubsub.ValidationResult, error) {
c := m.Message.Contribution
seen, err := s.hasSeenSyncContributionBits(c)
if err != nil {
return pubsub.ValidationIgnore, err
}
if seen {
return pubsub.ValidationIgnore, nil
}
seen = s.hasSeenSyncContributionIndexSlot(c.Slot, m.Message.AggregatorIndex, types.CommitteeIndex(c.SubcommitteeIndex))
seen := s.hasSeenSyncContributionIndexSlot(m.Message.Contribution.Slot, m.Message.AggregatorIndex, types.CommitteeIndex(m.Message.Contribution.SubcommitteeIndex))
if seen {
return pubsub.ValidationIgnore, nil
}
@@ -330,68 +318,6 @@ func (s *Service) setSyncContributionIndexSlotSeen(slot types.Slot, aggregatorIn
s.seenSyncContributionCache.Add(string(b), true)
}
// Set sync contribution's slot, root, committee index and bits.
func (s *Service) setSyncContributionBits(c *ethpb.SyncCommitteeContribution) error {
s.syncContributionBitsOverlapLock.Lock()
defer s.syncContributionBitsOverlapLock.Unlock()
// Copying due to how pb unmarshalling is carried out, prevent mutation.
b := append(bytesutil.SafeCopyBytes(c.BlockRoot), bytesutil.Bytes32(uint64(c.Slot))...)
b = append(b, bytesutil.Bytes32(c.SubcommitteeIndex)...)
v, ok := s.syncContributionBitsOverlapCache.Get(string(b))
if !ok {
s.syncContributionBitsOverlapCache.Add(string(b), [][]byte{c.AggregationBits.Bytes()})
return nil
}
bitsList, ok := v.([][]byte)
if !ok {
return errors.New("could not covert cached value to []bitfield.Bitvector")
}
has, err := bitListOverlaps(bitsList, c.AggregationBits)
if err != nil {
return err
}
if has {
return nil
}
s.syncContributionBitsOverlapCache.Add(string(b), append(bitsList, c.AggregationBits.Bytes()))
return nil
}
// Check sync contribution bits don't have an overlap with one's in cache.
func (s *Service) hasSeenSyncContributionBits(c *ethpb.SyncCommitteeContribution) (bool, error) {
s.syncContributionBitsOverlapLock.RLock()
defer s.syncContributionBitsOverlapLock.RUnlock()
b := append(c.BlockRoot, bytesutil.Bytes32(uint64(c.Slot))...)
b = append(b, bytesutil.Bytes32(c.SubcommitteeIndex)...)
v, ok := s.syncContributionBitsOverlapCache.Get(string(b))
if !ok {
return false, nil
}
bitsList, ok := v.([][]byte)
if !ok {
return false, errors.New("could not covert cached value to []bitfield.Bitvector128")
}
return bitListOverlaps(bitsList, c.AggregationBits.Bytes())
}
// bitListOverlaps returns true if there's an overlap between two bitlists.
func bitListOverlaps(bitLists [][]byte, b []byte) (bool, error) {
for _, bitList := range bitLists {
if bitList == nil {
return false, errors.New("nil bitfield")
}
bl := ethpb.ConvertToSyncContributionBitVector(bitList)
overlaps, err := bl.Overlaps(ethpb.ConvertToSyncContributionBitVector(b))
if err != nil {
return false, err
}
if overlaps {
return true, nil
}
}
return false, nil
}
// verifySyncSelectionData verifies that the provided sync contribution has a valid
// selection proof.
func (s *Service) verifySyncSelectionData(ctx context.Context, m *ethpb.ContributionAndProof) error {

View File

@@ -1095,121 +1095,3 @@ func syncSelectionProofSigningRoot(st state.BeaconState, slot types.Slot, comIdx
selectionData := &ethpb.SyncAggregatorSelectionData{Slot: slot, SubcommitteeIndex: uint64(comIdx)}
return signing.ComputeSigningRoot(selectionData, dom)
}
func TestService_setSyncContributionIndexSlotSeen(t *testing.T) {
chainService := &mockChain.ChainService{
Genesis: time.Now(),
ValidatorsRoot: [32]byte{'A'},
}
s := NewService(context.Background(), WithP2P(mockp2p.NewTestP2P(t)), WithStateNotifier(chainService.StateNotifier()))
s.initCaches()
// Empty cache
b0 := bitfield.NewBitvector128()
b0.SetBitAt(0, true)
has, err := s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
AggregationBits: b0,
})
require.NoError(t, err)
require.Equal(t, false, has)
// Cache with entries but same key
require.NoError(t, s.setSyncContributionBits(&ethpb.SyncCommitteeContribution{
AggregationBits: b0,
}))
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
AggregationBits: b0,
})
require.NoError(t, err)
require.Equal(t, true, has)
b1 := bitfield.NewBitvector128()
b1.SetBitAt(1, true)
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
AggregationBits: b1,
})
require.NoError(t, err)
require.Equal(t, false, has)
b2 := bitfield.NewBitvector128()
b2.SetBitAt(1, true)
b2.SetBitAt(2, true)
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
AggregationBits: b2,
})
require.NoError(t, err)
require.Equal(t, false, has)
b2.SetBitAt(0, true)
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
AggregationBits: b2,
})
require.NoError(t, err)
require.Equal(t, true, has)
// Make sure set doesn't contain existing overlaps
require.Equal(t, 1, s.syncContributionBitsOverlapCache.Len())
// Cache with entries but different key
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 2,
BlockRoot: []byte{'A'},
AggregationBits: b0,
})
require.NoError(t, err)
require.Equal(t, false, has)
require.NoError(t, s.setSyncContributionBits(&ethpb.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 2,
BlockRoot: []byte{'A'},
AggregationBits: b2,
}))
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 2,
BlockRoot: []byte{'A'},
AggregationBits: b0,
})
require.NoError(t, err)
require.Equal(t, true, has)
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 2,
BlockRoot: []byte{'A'},
AggregationBits: b1,
})
require.NoError(t, err)
require.Equal(t, true, has)
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 2,
BlockRoot: []byte{'A'},
AggregationBits: b2,
})
require.NoError(t, err)
require.Equal(t, true, has)
// Check invariant with the keys
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
Slot: 2,
SubcommitteeIndex: 2,
BlockRoot: []byte{'A'},
AggregationBits: b0,
})
require.NoError(t, err)
require.Equal(t, false, has)
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 2,
BlockRoot: []byte{'B'},
AggregationBits: b0,
})
require.NoError(t, err)
require.Equal(t, false, has)
has, err = s.hasSeenSyncContributionBits(&ethpb.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 3,
BlockRoot: []byte{'A'},
AggregationBits: b0,
})
require.NoError(t, err)
require.Equal(t, false, has)
}

View File

@@ -15,7 +15,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/cmd/beacon-chain",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/builder:go_default_library",
"//beacon-chain/node:go_default_library",
"//cmd:go_default_library",
"//cmd/beacon-chain/blockchain:go_default_library",

View File

@@ -11,12 +11,6 @@ import (
)
var (
// MevRelayEndpoint provides an HTTP access endpoint to a MEV builder network.
MevRelayEndpoint = &cli.StringFlag{
Name: "http-mev-relay",
Usage: "A MEV builder relay string http endpoint, this wil be used to interact MEV builder network using API defined in: https://ethereum.github.io/builder-specs/#/Builder",
Value: "",
}
// HTTPWeb3ProviderFlag provides an HTTP access endpoint to an ETH 1.0 RPC.
HTTPWeb3ProviderFlag = &cli.StringFlag{
Name: "http-web3provider",
@@ -217,25 +211,4 @@ var (
Usage: "Post bellatrix, this address will receive the transaction fees produced by any blocks from this node. Default to junk whilst bellatrix is in development state. Validator client can override this value through the preparebeaconproposer api.",
Value: fieldparams.EthBurnAddressHex,
}
// TerminalTotalDifficultyOverride specifies the total difficulty to manual overrides the `TERMINAL_TOTAL_DIFFICULTY` parameter.
TerminalTotalDifficultyOverride = &cli.StringFlag{
Name: "terminal-total-difficulty-override",
Usage: "Sets the total difficulty to manual overrides the default TERMINAL_TOTAL_DIFFICULTY value. " +
"WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal difficulty. " +
"Incorrect usage will result in your node experience consensus failure.",
}
// TerminalBlockHashOverride specifies the terminal block hash to manual overrides the `TERMINAL_BLOCK_HASH` parameter.
TerminalBlockHashOverride = &cli.StringFlag{
Name: "terminal-block-hash-override",
Usage: "Sets the block hash to manual overrides the default TERMINAL_BLOCK_HASH value. " +
"WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash. " +
"Incorrect usage will result in your node experience consensus failure.",
}
// TerminalBlockHashActivationEpochOverride specifies the terminal block hash epoch to manual overrides the `TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH` parameter.
TerminalBlockHashActivationEpochOverride = &cli.Uint64Flag{
Name: "terminal-block-hash-epoch-override",
Usage: "Sets the block hash epoch to manual overrides the default TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH value. " +
"WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash activation epoch. " +
"Incorrect usage will result in your node experience consensus failure.",
}
)

View File

@@ -11,7 +11,6 @@ import (
gethlog "github.com/ethereum/go-ethereum/log"
golog "github.com/ipfs/go-log/v2"
joonix "github.com/joonix/log"
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/beacon-chain/node"
"github.com/prysmaticlabs/prysm/cmd"
blockchaincmd "github.com/prysmaticlabs/prysm/cmd/beacon-chain/blockchain"
@@ -70,10 +69,6 @@ var appFlags = []cli.Flag{
flags.Eth1HeaderReqLimit,
flags.MinPeersPerSubnet,
flags.SuggestedFeeRecipient,
flags.TerminalTotalDifficultyOverride,
flags.TerminalBlockHashOverride,
flags.TerminalBlockHashActivationEpochOverride,
flags.MevRelayEndpoint,
cmd.EnableBackupWebhookFlag,
cmd.BackupWebhookOutputDir,
cmd.MinimalConfigFlag,
@@ -253,14 +248,9 @@ func startNode(ctx *cli.Context) error {
if err != nil {
return err
}
builderFlagOpts, err := builder.FlagOptions(ctx)
if err != nil {
return err
}
opts := []node.Option{
node.WithBlockchainFlagOptions(blockchainFlagOpts),
node.WithPowchainFlagOptions(powchainFlagOpts),
node.WithBuilderFlagOptions(builderFlagOpts),
}
optFuncs := []func(*cli.Context) (node.Option, error){

View File

@@ -126,7 +126,6 @@ var appHelpFlagGroups = []flagGroup{
flags.WeakSubjectivityCheckpoint,
flags.Eth1HeaderReqLimit,
flags.MinPeersPerSubnet,
flags.MevRelayEndpoint,
checkpoint.BlockPath,
checkpoint.StatePath,
checkpoint.RemoteURL,
@@ -138,9 +137,6 @@ var appHelpFlagGroups = []flagGroup{
Name: "merge",
Flags: []cli.Flag{
flags.SuggestedFeeRecipient,
flags.TerminalTotalDifficultyOverride,
flags.TerminalBlockHashOverride,
flags.TerminalBlockHashActivationEpochOverride,
},
},
{

View File

@@ -110,8 +110,6 @@ type BeaconChainConfig struct {
DomainSyncCommittee [4]byte `yaml:"DOMAIN_SYNC_COMMITTEE" spec:"true"` // DomainVoluntaryExit defines the BLS signature domain for sync committee.
DomainSyncCommitteeSelectionProof [4]byte `yaml:"DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF" spec:"true"` // DomainSelectionProof defines the BLS signature domain for sync committee selection proof.
DomainContributionAndProof [4]byte `yaml:"DOMAIN_CONTRIBUTION_AND_PROOF" spec:"true"` // DomainAggregateAndProof defines the BLS signature domain for contribution and proof.
DomainApplicationMask [4]byte `yaml:"DOMAIN_APPLICATION_MASK" spec:"true"` // DomainApplicationMask defines the BLS signature domain for application mask.
DomainApplicationBuilder [4]byte // DomainApplicationBuilder defines the BLS signature domain for application builder.
// Prysm constants.
GweiPerEth uint64 // GweiPerEth is the amount of gwei corresponding to 1 eth.

View File

@@ -155,18 +155,16 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxVoluntaryExits: 16,
// BLS domain values.
DomainBeaconProposer: bytesutil.Uint32ToBytes4(0x00000000),
DomainBeaconAttester: bytesutil.Uint32ToBytes4(0x01000000),
DomainRandao: bytesutil.Uint32ToBytes4(0x02000000),
DomainDeposit: bytesutil.Uint32ToBytes4(0x03000000),
DomainVoluntaryExit: bytesutil.Uint32ToBytes4(0x04000000),
DomainSelectionProof: bytesutil.Uint32ToBytes4(0x05000000),
DomainAggregateAndProof: bytesutil.Uint32ToBytes4(0x06000000),
DomainSyncCommittee: bytesutil.Uint32ToBytes4(0x07000000),
DomainSyncCommitteeSelectionProof: bytesutil.Uint32ToBytes4(0x08000000),
DomainContributionAndProof: bytesutil.Uint32ToBytes4(0x09000000),
DomainApplicationMask: bytesutil.Uint32ToBytes4(0x00000001),
DomainApplicationBuilder: bytesutil.Uint32ToBytes4(0x00000001),
DomainBeaconProposer: bytesutil.ToBytes4(bytesutil.Bytes4(0)),
DomainBeaconAttester: bytesutil.ToBytes4(bytesutil.Bytes4(1)),
DomainRandao: bytesutil.ToBytes4(bytesutil.Bytes4(2)),
DomainDeposit: bytesutil.ToBytes4(bytesutil.Bytes4(3)),
DomainVoluntaryExit: bytesutil.ToBytes4(bytesutil.Bytes4(4)),
DomainSelectionProof: bytesutil.ToBytes4(bytesutil.Bytes4(5)),
DomainAggregateAndProof: bytesutil.ToBytes4(bytesutil.Bytes4(6)),
DomainSyncCommittee: bytesutil.ToBytes4(bytesutil.Bytes4(7)),
DomainSyncCommitteeSelectionProof: bytesutil.ToBytes4(bytesutil.Bytes4(8)),
DomainContributionAndProof: bytesutil.ToBytes4(bytesutil.Bytes4(9)),
// Prysm constants.
GweiPerEth: 1000000000,

View File

@@ -32,7 +32,7 @@ func RopstenConfig() *BeaconChainConfig {
cfg.AltairForkVersion = []byte{0x80, 0x00, 0x00, 0x70}
cfg.BellatrixForkEpoch = 750
cfg.BellatrixForkVersion = []byte{0x80, 0x00, 0x00, 0x71}
cfg.TerminalTotalDifficulty = "100000000000000000000000"
cfg.TerminalTotalDifficulty = "43531756765713534"
cfg.DepositContractAddress = "0x6f22fFbC56eFF051aECF839396DD1eD9aD6BBA9D"
cfg.InitializeForkSchedule()
return cfg

Some files were not shown because too many files have changed in this diff Show More