mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 14:28:09 -05:00
Compare commits
7 Commits
rename
...
set-invali
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ede21fb4c1 | ||
|
|
3f1e3cf82f | ||
|
|
60003c481b | ||
|
|
c1197d7881 | ||
|
|
693cc79cc9 | ||
|
|
23778959eb | ||
|
|
92278e2255 |
3
.bazelrc
3
.bazelrc
@@ -217,3 +217,6 @@ build:remote --remote_local_fallback
|
||||
|
||||
# Ignore GoStdLib with remote caching
|
||||
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
|
||||
|
||||
# Set bazel gotag
|
||||
build --define gotags=bazel
|
||||
|
||||
@@ -16,6 +16,8 @@ exports_files([
|
||||
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
|
||||
# gazelle:build_tags bazel
|
||||
# gazelle:exclude tools/analyzers/**/testdata/**
|
||||
gazelle(
|
||||
name = "gazelle",
|
||||
prefix = prefix,
|
||||
@@ -125,6 +127,7 @@ nogo(
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
"//tools/analyzers/recursivelock:go_default_library",
|
||||
"//tools/analyzers/uintcast:go_default_library",
|
||||
] + select({
|
||||
# nogo checks that fail with coverage enabled.
|
||||
":coverage_enabled": [],
|
||||
|
||||
@@ -288,11 +288,11 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0,
|
||||
params.BeaconConfig().ZeroHash)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
|
||||
@@ -302,11 +302,11 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -384,8 +384,8 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -400,8 +400,8 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -419,8 +419,8 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
@@ -430,8 +430,8 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -83,8 +83,12 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
}
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(10261) send optimistic status
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j, false /* optimistic status */); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headStartRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,11 +25,11 @@ func TestService_newSlot(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0, false)) // genesis
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0, false)) // finalized
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false)) // justified
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0, false)) // best justified
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0, false)) // bad
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0)) // genesis
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0)) // finalized
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0)) // justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0)) // best justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0)) // bad
|
||||
|
||||
type args struct {
|
||||
slot types.Slot
|
||||
|
||||
@@ -62,7 +62,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
|
||||
}
|
||||
|
||||
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
|
||||
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
|
||||
payloadID, validHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case v1.ErrAcceptedSyncingPayloadStatus:
|
||||
@@ -72,6 +72,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
|
||||
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
case v1.ErrInvalidPayloadStatus:
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, bytesutil.ToBytes32(validHash)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.Wrap(err, "Received invalid hash from execution engine")
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
|
||||
}
|
||||
@@ -103,7 +108,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int, hea
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
validHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case v1.ErrAcceptedSyncingPayloadStatus:
|
||||
@@ -112,6 +117,11 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int, hea
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return nil
|
||||
case v1.ErrInvalidPayloadStatus:
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, bytesutil.ToBytes32(validHash)); err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Wrap(err, "Received invalid hash from execution engine")
|
||||
default:
|
||||
return errors.Wrap(err, "could not validate execution payload from execution engine")
|
||||
}
|
||||
|
||||
@@ -256,7 +256,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
@@ -571,8 +571,8 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, 0, 0, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b33.Block.Slot, r33, r32, 0, 0))
|
||||
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -112,7 +112,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
}
|
||||
|
||||
// TODO(10261) Check optimistic status
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */, false /*optimistic sync*/); err != nil {
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -350,11 +350,15 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
// their state summaries and split them off to relative hot/cold storage.
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
b := signed.Block()
|
||||
|
||||
s.saveInitSyncBlock(blockRoot, signed)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint, optimistic); err != nil {
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(10261) send optimistic status
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -442,13 +446,13 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk block.BeaconBlock, root [32]byte,
|
||||
st state.BeaconState, optimistic bool) error {
|
||||
st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint, optimistic); err != nil {
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block's attestations to fork choice store.
|
||||
@@ -467,24 +471,24 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.BeaconBlock,
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
|
||||
//TODO(10261) check if the blocks are optimistic or not when filling fork choice
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block to fork choice store.
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()),
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch, optimistic); err != nil {
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
}
|
||||
return nil
|
||||
// TODO(10261) send optimistic status
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root)
|
||||
}
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool, optimistic bool) error {
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
|
||||
defer span.End()
|
||||
if initSync {
|
||||
@@ -495,7 +499,7 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st, optimistic); err != nil {
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -366,14 +366,17 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.B
|
||||
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
||||
b := pendingNodes[i]
|
||||
r := pendingRoots[i]
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()),
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch, false /* optimistic status */); err != nil {
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
}
|
||||
// TODO(10261) send optimistic status
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1035,7 +1035,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0, false)) // Saves blocks to fork choice store.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0)) // Saves blocks to fork choice store.
|
||||
}
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
@@ -1080,7 +1080,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
|
||||
}
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, 0, 0, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), 200, r200, r200, 0, 0))
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -111,7 +111,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
service.processAttestations(ctx)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
|
||||
@@ -87,7 +87,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
for i, b := range blocks {
|
||||
blockCopy := b.Copy()
|
||||
// TODO(10261) check optimistic status
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i], false /*optimistic status*/); err != nil {
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ func (s *Service) Status() error {
|
||||
|
||||
func (s *Service) startFromSavedState(saved state.BeaconState) error {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0)
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
|
||||
originRoot, err := s.originRootFromSavedState(s.ctx)
|
||||
@@ -449,14 +449,17 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
genesisBlk.Block().Slot(),
|
||||
genesisBlkRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
genesisCheckpoint.Epoch,
|
||||
genesisCheckpoint.Epoch, false /* optimistic status */); err != nil {
|
||||
genesisCheckpoint.Epoch); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
log.Fatalf("Could not set optimistic status of genesis block to false: %v", err)
|
||||
}
|
||||
|
||||
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
|
||||
return nil
|
||||
|
||||
@@ -460,7 +460,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -479,7 +479,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -547,7 +547,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := v1.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -568,7 +568,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := v1.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
9
beacon-chain/cache/active_balance.go
vendored
9
beacon-chain/cache/active_balance.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
@@ -17,10 +18,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// maxBalanceCacheSize defines the max number of active balances can cache.
|
||||
maxBalanceCacheSize = uint64(4)
|
||||
maxBalanceCacheSize = int(4)
|
||||
)
|
||||
|
||||
var (
|
||||
// BalanceCacheMiss tracks the number of balance requests that aren't present in the cache.
|
||||
balanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "total_effective_balance_cache_miss",
|
||||
@@ -42,7 +45,7 @@ type BalanceCache struct {
|
||||
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
|
||||
func NewEffectiveBalanceCache() *BalanceCache {
|
||||
return &BalanceCache{
|
||||
cache: lruwrpr.New(int(maxBalanceCacheSize)),
|
||||
cache: lruwrpr.New(maxBalanceCacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
9
beacon-chain/cache/committee.go
vendored
9
beacon-chain/cache/committee.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
@@ -19,11 +20,13 @@ import (
|
||||
mathutil "github.com/prysmaticlabs/prysm/math"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxCommitteesCacheSize = uint64(32)
|
||||
maxCommitteesCacheSize = int(32)
|
||||
)
|
||||
|
||||
var (
|
||||
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
|
||||
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "committee_cache_miss",
|
||||
@@ -55,7 +58,7 @@ func committeeKeyFn(obj interface{}) (string, error) {
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteesCache() *CommitteeCache {
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: lruwrpr.New(int(maxCommitteesCacheSize)),
|
||||
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
|
||||
inProgress: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/committee_fuzz_test.go
vendored
4
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -33,7 +33,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
|
||||
}
|
||||
|
||||
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
@@ -50,5 +50,5 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
assert.DeepEqual(t, c.SortedIndices, indices)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
|
||||
}
|
||||
|
||||
2
beacon-chain/cache/committee_test.go
vendored
2
beacon-chain/cache/committee_test.go
vendored
@@ -102,7 +102,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.Keys()
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
|
||||
assert.Equal(t, maxCommitteesCacheSize, len(k))
|
||||
|
||||
sort.Slice(k, func(i, j int) bool {
|
||||
return k[i].(string) < k[j].(string)
|
||||
|
||||
2
beacon-chain/cache/subnet_ids.go
vendored
2
beacon-chain/cache/subnet_ids.go
vendored
@@ -27,7 +27,7 @@ var SubnetIDs = newSubnetIDs()
|
||||
func newSubnetIDs() *subnetIDs {
|
||||
// Given a node can calculate committee assignments of current epoch and next epoch.
|
||||
// Max size is set to 2 epoch length.
|
||||
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
|
||||
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2)) // lint:ignore uintcast -- constant values that would panic on startup if negative.
|
||||
attesterCache := lruwrpr.New(cacheSize)
|
||||
aggregatorCache := lruwrpr.New(cacheSize)
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
|
||||
@@ -225,7 +225,7 @@ func verifyDeposit(beaconState state.ReadOnlyBeaconState, deposit *ethpb.Deposit
|
||||
if ok := trie.VerifyMerkleProofWithDepth(
|
||||
receiptRoot,
|
||||
leaf[:],
|
||||
int(beaconState.Eth1DepositIndex()),
|
||||
beaconState.Eth1DepositIndex(),
|
||||
deposit.Proof,
|
||||
params.BeaconConfig().DepositContractTreeDepth,
|
||||
); !ok {
|
||||
|
||||
@@ -92,7 +92,7 @@ func ExecuteStateTransition(
|
||||
func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlot")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot())))
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
|
||||
|
||||
prevStateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
@@ -190,7 +190,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot())))
|
||||
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
|
||||
|
||||
// The block must have a higher slot than parent state.
|
||||
if state.Slot() >= slot {
|
||||
@@ -354,7 +354,7 @@ func VerifyOperationLengths(_ context.Context, state state.BeaconState, b block.
|
||||
func ProcessEpochPrecompute(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessEpochPrecompute")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state))))
|
||||
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state)))) // lint:ignore uintcast -- This is OK for tracing.
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
|
||||
@@ -27,9 +27,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- justified: 1, finalized: 0
|
||||
// |
|
||||
// 3 <- justified: 2, finalized: 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1))
|
||||
|
||||
// With starting justified epoch at 0, the head should be 3:
|
||||
// 0 <- start
|
||||
@@ -89,17 +89,17 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
// | |
|
||||
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
|
||||
// Left branch.
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0))
|
||||
// Right branch.
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0))
|
||||
|
||||
// With start at 0, the head should be 10:
|
||||
// 0 <-- start
|
||||
@@ -185,7 +185,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
|
||||
ctx := context.Background()
|
||||
f := New(justifiedEpoch, finalizedEpoch)
|
||||
err := f.ProcessBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, justifiedEpoch, finalizedEpoch, false)
|
||||
err := f.InsertOptimisticBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -102,17 +102,17 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
|
||||
processedAttestationCount.Inc()
|
||||
}
|
||||
|
||||
// ProcessBlock processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) ProcessBlock(
|
||||
// InsertOptimisticBlock processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertOptimisticBlock(
|
||||
ctx context.Context,
|
||||
slot types.Slot,
|
||||
blockRoot, parentRoot [fieldparams.RootLength]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertOptimisticBlock")
|
||||
defer span.End()
|
||||
|
||||
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch, optimistic)
|
||||
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch)
|
||||
}
|
||||
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
|
||||
@@ -15,9 +15,9 @@ import (
|
||||
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
|
||||
|
||||
f.votes = []Vote{
|
||||
{indexToHash(1), indexToHash(1), 0},
|
||||
@@ -37,9 +37,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
@@ -61,12 +61,12 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(4), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 6, indexToHash(6), indexToHash(5), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(4), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, indexToHash(6), indexToHash(5), 1, 1))
|
||||
|
||||
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
|
||||
require.Equal(t, false, f.IsCanonical(indexToHash(1)))
|
||||
@@ -80,12 +80,12 @@ func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, 1, 1))
|
||||
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
|
||||
@@ -114,9 +114,9 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(3), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(3), indexToHash(2), 1, 1))
|
||||
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
|
||||
f.store.treeRootNode.parent = nil
|
||||
|
||||
@@ -140,8 +140,8 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
|
||||
require.NoError(t, err)
|
||||
@@ -152,8 +152,8 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -48,10 +48,10 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
optimisticCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "doublylinkedtree_optimistic_count",
|
||||
Help: "The number of blocks that have been optimistically synced.",
|
||||
validatedCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_validated_count",
|
||||
Help: "The number of blocks that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// /
|
||||
// 2 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -33,7 +33,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// / \
|
||||
// head -> 2 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -44,7 +44,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 2 1
|
||||
// |
|
||||
// 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -55,7 +55,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 2 1
|
||||
// | |
|
||||
// head -> 4 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -68,7 +68,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 4 3
|
||||
// |
|
||||
// 5 <- justified epoch = 2
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -107,7 +107,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 5
|
||||
// |
|
||||
// 6 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1))
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
@@ -120,6 +120,7 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
}
|
||||
|
||||
n.optimistic = false
|
||||
validatedCount.Inc()
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -36,9 +36,9 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -63,7 +63,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is not viable.
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 2, 3, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 2, 3))
|
||||
|
||||
// Verify parent's best child and best descendant are `none`.
|
||||
s := f.store
|
||||
@@ -76,7 +76,7 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
s := f.store
|
||||
assert.Equal(t, 1, len(s.treeRootNode.children))
|
||||
@@ -87,8 +87,8 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
@@ -103,8 +103,8 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
@@ -119,9 +119,9 @@ func TestNode_TestDepth(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
s := f.store
|
||||
require.Equal(t, s.nodeByRoot[indexToHash(2)].depth(), uint64(2))
|
||||
@@ -151,11 +151,11 @@ func TestNode_ViableForHead(t *testing.T) {
|
||||
func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
f := setup(4, 3)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(3), 4, 3, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(3), 4, 3))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
|
||||
@@ -172,11 +172,13 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
// \
|
||||
// -- 5 (true)
|
||||
//
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(3), 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(1), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), 1, 1))
|
||||
|
||||
opt, err := f.IsOptimistic(ctx, indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -51,18 +51,18 @@ func TestPruneInvalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
|
||||
require.NoError(t, f.store.removeNode(context.Background(), tc.root))
|
||||
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
|
||||
|
||||
@@ -41,14 +41,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot := types.Slot(1)
|
||||
newRoot := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
|
||||
@@ -65,14 +64,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(2)
|
||||
newRoot = indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
|
||||
@@ -91,14 +89,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(3)
|
||||
newRoot = indexToHash(3)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
|
||||
@@ -117,14 +114,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(3)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
@@ -187,14 +183,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
@@ -204,14 +199,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -253,14 +247,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -272,14 +265,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -328,14 +320,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
cSlot := types.Slot(2)
|
||||
c := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
cSlot,
|
||||
c,
|
||||
a, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -352,14 +343,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
bSlot := types.Slot(1)
|
||||
b := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
bSlot,
|
||||
b,
|
||||
a, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -376,14 +366,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
dSlot := types.Slot(3)
|
||||
d := indexToHash(3)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
dSlot,
|
||||
d,
|
||||
b, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, err
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root, parentRoot [fieldparams.RootLength]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool) error {
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
@@ -117,7 +117,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
parent: parent,
|
||||
justifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
optimistic: optimistic,
|
||||
optimistic: true,
|
||||
}
|
||||
|
||||
s.nodeByRoot[root] = n
|
||||
@@ -128,14 +128,6 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
if !optimistic {
|
||||
if err := n.setNodeAndParentValidated(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
optimisticCount.Inc()
|
||||
}
|
||||
|
||||
// Set the node as root if the store was empty
|
||||
if s.treeRootNode == nil {
|
||||
s.treeRootNode = n
|
||||
|
||||
@@ -33,14 +33,14 @@ func TestStore_FinalizedEpoch(t *testing.T) {
|
||||
|
||||
func TestStore_NodeCount(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.Equal(t, 2, f.NodeCount())
|
||||
}
|
||||
|
||||
func TestStore_NodeByRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
node0 := f.store.treeRootNode
|
||||
node1 := node0.children[0]
|
||||
node2 := node1.children[0]
|
||||
@@ -61,7 +61,7 @@ func TestStore_NodeByRoot(t *testing.T) {
|
||||
|
||||
func TestForkChoice_HasNode(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.Equal(t, true, f.HasNode(indexToHash(1)))
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
|
||||
|
||||
func TestStore_Head_Itself(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
// Since the justified node does not have a best descendant so the best node
|
||||
// is itself.
|
||||
@@ -85,10 +85,10 @@ func TestStore_Head_Itself(t *testing.T) {
|
||||
|
||||
func TestStore_Head_BestDescendant(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(4), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(4), indexToHash(2), 0, 0))
|
||||
h, err := f.store.head(context.Background(), indexToHash(1))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, h, indexToHash(4))
|
||||
@@ -97,9 +97,9 @@ func TestStore_Head_BestDescendant(t *testing.T) {
|
||||
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
cancel()
|
||||
err := f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false)
|
||||
err := f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0)
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestStore_Insert(t *testing.T) {
|
||||
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
|
||||
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), 1, 1, false))
|
||||
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), 1, 1))
|
||||
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
|
||||
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
|
||||
@@ -132,9 +132,9 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
|
||||
numOfNodes := uint64(100)
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
for i := uint64(2); i < numOfNodes; i++ {
|
||||
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
|
||||
}
|
||||
|
||||
s := f.store
|
||||
@@ -151,9 +151,9 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
numOfNodes := uint64(100)
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
for i := uint64(2); i < numOfNodes; i++ {
|
||||
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
|
||||
}
|
||||
|
||||
s := f.store
|
||||
@@ -169,9 +169,9 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
numOfNodes := uint64(100)
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
for i := uint64(2); i < numOfNodes; i++ {
|
||||
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
|
||||
}
|
||||
|
||||
s := f.store
|
||||
@@ -196,8 +196,8 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
f.store.pruneThreshold = 0
|
||||
|
||||
s := f.store
|
||||
@@ -221,18 +221,18 @@ func TestStore_tips(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
expectedMap := map[[32]byte]types.Slot{
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'i'}: 106,
|
||||
@@ -250,8 +250,8 @@ func TestStore_tips(t *testing.T) {
|
||||
func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
s := f.store
|
||||
s.pruneThreshold = 0
|
||||
@@ -263,9 +263,9 @@ func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
func TestStore_HasParent(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1))
|
||||
require.Equal(t, false, f.HasParent(params.BeaconConfig().ZeroHash))
|
||||
require.Equal(t, true, f.HasParent(indexToHash(1)))
|
||||
require.Equal(t, true, f.HasParent(indexToHash(2)))
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// /
|
||||
// 2 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -32,7 +32,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// / \
|
||||
// head -> 2 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -62,7 +62,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// head -> 2 1
|
||||
// |
|
||||
// 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -98,7 +98,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 3
|
||||
// |
|
||||
// 4 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -114,7 +114,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4 <- head
|
||||
// /
|
||||
// 5 <- justified epoch = 2
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -130,7 +130,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4 <- head
|
||||
// / \
|
||||
// 5 6 <- justified epoch = 0
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
|
||||
|
||||
// Moved 2 votes to block 5:
|
||||
// 0
|
||||
@@ -142,7 +142,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4
|
||||
// / \
|
||||
// 2 votes-> 5 6
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
|
||||
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
|
||||
|
||||
@@ -163,9 +163,9 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 8
|
||||
// |
|
||||
// 9
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2, true))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2, true))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -210,7 +210,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// / \
|
||||
// 2 votes->9 10
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
require.NoError(t, err)
|
||||
@@ -289,7 +289,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 9 10
|
||||
// |
|
||||
// head-> 11
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -29,13 +29,13 @@ type HeadRetriever interface {
|
||||
|
||||
// BlockProcessor processes the block that's used for accounting fork choice.
|
||||
type BlockProcessor interface {
|
||||
ProcessBlock(ctx context.Context,
|
||||
InsertOptimisticBlock(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root [32]byte,
|
||||
parentRoot [32]byte,
|
||||
justifiedEpoch types.Epoch,
|
||||
finalizedEpoch types.Epoch,
|
||||
optimisticStatus bool) error
|
||||
) error
|
||||
}
|
||||
|
||||
// AttestationProcessor processes the attestation that's used for accounting fork choice.
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -27,9 +27,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- justified: 1, finalized: 0
|
||||
// |
|
||||
// 3 <- justified: 2, finalized: 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1))
|
||||
|
||||
// With starting justified epoch at 0, the head should be 3:
|
||||
// 0 <- start
|
||||
@@ -89,17 +89,17 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
// | |
|
||||
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
|
||||
// Left branch.
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0))
|
||||
// Right branch.
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0))
|
||||
|
||||
// With start at 0, the head should be 10:
|
||||
// 0 <-- start
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -46,19 +47,27 @@ func computeDeltas(
|
||||
if ok {
|
||||
// Protection against out of bound, the `nextDeltaIndex` which defines
|
||||
// the block location in the dag can not exceed the total `delta` length.
|
||||
if int(nextDeltaIndex) >= len(deltas) {
|
||||
if nextDeltaIndex >= uint64(len(deltas)) {
|
||||
return nil, nil, errInvalidNodeDelta
|
||||
}
|
||||
deltas[nextDeltaIndex] += int(newBalance)
|
||||
delta, err := pmath.Int(newBalance)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
deltas[nextDeltaIndex] += delta
|
||||
}
|
||||
|
||||
currentDeltaIndex, ok := blockIndices[vote.currentRoot]
|
||||
if ok {
|
||||
// Protection against out of bound (same as above)
|
||||
if int(currentDeltaIndex) >= len(deltas) {
|
||||
if currentDeltaIndex >= uint64(len(deltas)) {
|
||||
return nil, nil, errInvalidNodeDelta
|
||||
}
|
||||
deltas[currentDeltaIndex] -= int(oldBalance)
|
||||
delta, err := pmath.Int(oldBalance)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
deltas[currentDeltaIndex] -= delta
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// /
|
||||
// 2 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -33,7 +33,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// / \
|
||||
// head -> 2 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -44,7 +44,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 2 1
|
||||
// |
|
||||
// 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -55,7 +55,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 2 1
|
||||
// | |
|
||||
// head -> 4 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -68,7 +68,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 4 3
|
||||
// |
|
||||
// 5 <- justified epoch = 2
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -107,7 +107,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 5
|
||||
// |
|
||||
// 6 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1))
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
@@ -50,6 +50,7 @@ func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, err
|
||||
_, ok = f.syncedTips.validatedTips[root]
|
||||
if ok {
|
||||
f.syncedTips.RUnlock()
|
||||
f.store.nodesLock.RUnlock()
|
||||
return false, nil
|
||||
}
|
||||
f.syncedTips.RUnlock()
|
||||
@@ -57,11 +58,13 @@ func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, err
|
||||
// If the slot is higher than the max synced tip, it's optimistic
|
||||
min, max := f.boundarySyncedTips()
|
||||
if slot > max {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If the slot is lower than the min synced tip, it's fully validated
|
||||
if slot <= min {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -69,6 +72,7 @@ func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, err
|
||||
// optimistic
|
||||
childIndex := node.BestChild()
|
||||
if childIndex == NonExistentNode {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -220,18 +220,18 @@ func TestSetOptimisticToValid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new VALID block
|
||||
tips map[[32]byte]types.Slot // the old synced tips
|
||||
@@ -409,18 +409,18 @@ func TestSetOptimisticToInvalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
|
||||
f.syncedTips.Lock()
|
||||
f.syncedTips.validatedTips = tc.tips
|
||||
@@ -467,18 +467,18 @@ func TestFindSyncedTip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the block
|
||||
tips map[[32]byte]types.Slot // the synced tips
|
||||
@@ -548,3 +548,45 @@ func TestFindSyncedTip(t *testing.T) {
|
||||
syncedTips.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// This is a regression test (10341)
|
||||
func TestIsOptimistic_DeadLock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 90, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
tips := map[[32]byte]types.Slot{
|
||||
[32]byte{'a'}: 100,
|
||||
[32]byte{'d'}: 102,
|
||||
}
|
||||
f.syncedTips.validatedTips = tips
|
||||
_, err := f.IsOptimistic(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'e'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'c'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
}
|
||||
|
||||
@@ -41,14 +41,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot := types.Slot(1)
|
||||
newRoot := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
|
||||
@@ -65,14 +64,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(2)
|
||||
newRoot = indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
|
||||
@@ -91,14 +89,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(2)
|
||||
newRoot = indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
|
||||
@@ -117,14 +114,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(3)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
@@ -184,14 +180,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
@@ -201,14 +196,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -250,14 +244,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -269,14 +262,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -325,14 +317,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
cSlot := types.Slot(2)
|
||||
c := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
cSlot,
|
||||
c,
|
||||
a, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -349,14 +340,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
bSlot := types.Slot(1)
|
||||
b := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
bSlot,
|
||||
b,
|
||||
a, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -373,14 +363,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
dSlot := types.Slot(3)
|
||||
d := indexToHash(3)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
dSlot,
|
||||
d,
|
||||
b, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -143,23 +144,16 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
|
||||
return f.store.proposerBoost()
|
||||
}
|
||||
|
||||
// ProcessBlock processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) ProcessBlock(
|
||||
// InsertOptimisticBlock processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertOptimisticBlock(
|
||||
ctx context.Context,
|
||||
slot types.Slot,
|
||||
blockRoot, parentRoot [32]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessBlock")
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.InsertOptimisticBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !optimistic {
|
||||
return f.SetOptimisticToValid(ctx, blockRoot)
|
||||
}
|
||||
return nil
|
||||
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch)
|
||||
}
|
||||
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
@@ -446,7 +440,11 @@ func (s *Store) applyWeightChanges(
|
||||
s.proposerBoostLock.Unlock()
|
||||
return err
|
||||
}
|
||||
nodeDelta = nodeDelta + int(proposerScore)
|
||||
iProposerScore, err := pmath.Int(proposerScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeDelta = nodeDelta + iProposerScore
|
||||
}
|
||||
s.proposerBoostLock.Unlock()
|
||||
|
||||
|
||||
@@ -491,18 +491,18 @@ func TestStore_PruneSyncedTips(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
syncedTips := &optimisticStore{
|
||||
validatedTips: map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// /
|
||||
// 2 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -33,7 +33,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// / \
|
||||
// head -> 2 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -63,7 +63,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// head -> 2 1
|
||||
// |
|
||||
// 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -99,7 +99,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 3
|
||||
// |
|
||||
// 4 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -115,7 +115,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4 <- head
|
||||
// /
|
||||
// 5 <- justified epoch = 2
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -131,7 +131,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4 <- head
|
||||
// / \
|
||||
// 5 6 <- justified epoch = 0
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
|
||||
|
||||
// Moved 2 votes to block 5:
|
||||
// 0
|
||||
@@ -143,7 +143,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4
|
||||
// / \
|
||||
// 2 votes-> 5 6
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
|
||||
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
|
||||
|
||||
@@ -164,9 +164,9 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 8
|
||||
// |
|
||||
// 9
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2, true))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2, true))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -211,7 +211,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// / \
|
||||
// 2 votes->9 10
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
require.NoError(t, err)
|
||||
@@ -290,7 +290,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 9 10
|
||||
// |
|
||||
// head-> 11
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -35,7 +35,7 @@ func configureHistoricalSlasher(cliCtx *cli.Context) {
|
||||
params.OverrideBeaconConfig(c)
|
||||
cmdConfig := cmd.Get()
|
||||
// Allow up to 4096 attestations at a time to be requested from the beacon nde.
|
||||
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations))
|
||||
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)) // lint:ignore uintcast -- Page size should not exceed int64 with these constants.
|
||||
cmd.Init(cmdConfig)
|
||||
log.Warnf(
|
||||
"Setting %d slots per archive point and %d max RPC page size for historical slasher usage. This requires additional storage",
|
||||
|
||||
@@ -104,8 +104,8 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
|
||||
span.AddAttributes(
|
||||
trace.BoolAttribute("hasPeer", hasPeer),
|
||||
trace.Int64Attribute("slot", int64(att.Data.Slot)),
|
||||
trace.Int64Attribute("subnet", int64(subnet)),
|
||||
trace.Int64Attribute("slot", int64(att.Data.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
)
|
||||
|
||||
if !hasPeer {
|
||||
@@ -160,8 +160,8 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
|
||||
span.AddAttributes(
|
||||
trace.BoolAttribute("hasPeer", hasPeer),
|
||||
trace.Int64Attribute("slot", int64(sMsg.Slot)),
|
||||
trace.Int64Attribute("subnet", int64(subnet)),
|
||||
trace.Int64Attribute("slot", int64(sMsg.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
)
|
||||
|
||||
if !hasPeer {
|
||||
@@ -213,7 +213,9 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
if span.IsRecordingEvents() {
|
||||
id := hash.FastSum64(buf.Bytes())
|
||||
messageLen := int64(buf.Len())
|
||||
span.AddMessageSendEvent(int64(id), messageLen /*uncompressed*/, messageLen /*compressed*/)
|
||||
// lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
iid := int64(id)
|
||||
span.AddMessageSendEvent(iid, messageLen /*uncompressed*/, messageLen /*compressed*/)
|
||||
}
|
||||
if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil {
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package encoder
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
fastssz "github.com/ferranbt/fastssz"
|
||||
@@ -11,6 +10,7 @@ import (
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
)
|
||||
|
||||
var _ NetworkEncoding = (*SszNetworkEncoder)(nil)
|
||||
@@ -145,11 +145,11 @@ func (_ SszNetworkEncoder) ProtocolSuffix() string {
|
||||
// MaxLength specifies the maximum possible length of an encoded
|
||||
// chunk of data.
|
||||
func (_ SszNetworkEncoder) MaxLength(length uint64) (int, error) {
|
||||
// Defensive check to prevent potential issues when casting to int64.
|
||||
if length > math.MaxInt64 {
|
||||
return 0, errors.Errorf("invalid length provided: %d", length)
|
||||
il, err := math.Int(length)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "invalid length provided")
|
||||
}
|
||||
maxLen := snappy.MaxEncodedLen(int(length))
|
||||
maxLen := snappy.MaxEncodedLen(il)
|
||||
if maxLen < 0 {
|
||||
return 0, errors.Errorf("max encoded length is negative: %d", maxLen)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
)
|
||||
|
||||
@@ -72,8 +73,8 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsub_pb.Message) string {
|
||||
// the topic byte string, and the raw message data: i.e. SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20].
|
||||
func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
|
||||
topic := *pmsg.Topic
|
||||
topicLen := uint64(len(topic))
|
||||
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(topicLen)
|
||||
topicLen := len(topic)
|
||||
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(uint64(topicLen)) // topicLen cannot be negative
|
||||
|
||||
// beyond Bellatrix epoch, allow 10 Mib gossip data size
|
||||
gossipPubSubSize := params.BeaconNetworkConfig().GossipMaxSize
|
||||
@@ -83,7 +84,25 @@ func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
|
||||
|
||||
decodedData, err := encoder.DecodeSnappy(pmsg.Data, gossipPubSubSize)
|
||||
if err != nil {
|
||||
totalLength := len(params.BeaconNetworkConfig().MessageDomainInvalidSnappy) + len(topicLenBytes) + int(topicLen) + len(pmsg.Data)
|
||||
totalLength, err := math.AddInt(
|
||||
len(params.BeaconNetworkConfig().MessageDomainValidSnappy),
|
||||
len(topicLenBytes),
|
||||
topicLen,
|
||||
len(pmsg.Data),
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to sum lengths of message domain and topic")
|
||||
// should never happen
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return string(msg)
|
||||
}
|
||||
if uint64(totalLength) > gossipPubSubSize {
|
||||
// this should never happen
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return string(msg)
|
||||
}
|
||||
combinedData := make([]byte, 0, totalLength)
|
||||
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:]...)
|
||||
combinedData = append(combinedData, topicLenBytes...)
|
||||
@@ -92,7 +111,19 @@ func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
|
||||
h := hash.Hash(combinedData)
|
||||
return string(h[:20])
|
||||
}
|
||||
totalLength := len(params.BeaconNetworkConfig().MessageDomainValidSnappy) + len(topicLenBytes) + int(topicLen) + len(decodedData)
|
||||
totalLength, err := math.AddInt(
|
||||
len(params.BeaconNetworkConfig().MessageDomainValidSnappy),
|
||||
len(topicLenBytes),
|
||||
topicLen,
|
||||
len(decodedData),
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to sum lengths of message domain and topic")
|
||||
// should never happen
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return string(msg)
|
||||
}
|
||||
combinedData := make([]byte, 0, totalLength)
|
||||
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainValidSnappy[:]...)
|
||||
combinedData = append(combinedData, topicLenBytes...)
|
||||
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["status.go"],
|
||||
srcs = [
|
||||
"log.go",
|
||||
"status.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
@@ -11,6 +14,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//time:go_default_library",
|
||||
@@ -22,6 +26,7 @@ go_library(
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
5
beacon-chain/p2p/peers/log.go
Normal file
5
beacon-chain/p2p/peers/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package peers
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "peers")
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/rand"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/metadata"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
@@ -749,12 +750,12 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
return p.deprecatedPeersToPrune()
|
||||
}
|
||||
connLimit := p.ConnectedPeerLimit()
|
||||
inBoundLimit := p.InboundLimit()
|
||||
inBoundLimit := uint64(p.InboundLimit())
|
||||
activePeers := p.Active()
|
||||
numInboundPeers := len(p.InboundConnected())
|
||||
numInboundPeers := uint64(len(p.InboundConnected()))
|
||||
// Exit early if we are still below our max
|
||||
// limit.
|
||||
if len(activePeers) <= int(connLimit) {
|
||||
if uint64(len(activePeers)) <= connLimit {
|
||||
return []peer.ID{}
|
||||
}
|
||||
p.store.Lock()
|
||||
@@ -784,10 +785,15 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
|
||||
// Determine amount of peers to prune using our
|
||||
// max connection limit.
|
||||
amountToPrune := len(activePeers) - int(connLimit)
|
||||
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
log.WithError(err).Error("Failed to determine amount of peers to prune")
|
||||
return []peer.ID{}
|
||||
}
|
||||
|
||||
// Also check for inbound peers above our limit.
|
||||
excessInbound := 0
|
||||
excessInbound := uint64(0)
|
||||
if numInboundPeers > inBoundLimit {
|
||||
excessInbound = numInboundPeers - inBoundLimit
|
||||
}
|
||||
@@ -796,7 +802,7 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
if excessInbound > amountToPrune {
|
||||
amountToPrune = excessInbound
|
||||
}
|
||||
if amountToPrune < len(peersToPrune) {
|
||||
if amountToPrune < uint64(len(peersToPrune)) {
|
||||
peersToPrune = peersToPrune[:amountToPrune]
|
||||
}
|
||||
ids := make([]peer.ID, 0, len(peersToPrune))
|
||||
@@ -815,7 +821,7 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
|
||||
numInboundPeers := len(p.InboundConnected())
|
||||
// Exit early if we are still below our max
|
||||
// limit.
|
||||
if len(activePeers) <= int(connLimit) {
|
||||
if uint64(len(activePeers)) <= connLimit {
|
||||
return []peer.ID{}
|
||||
}
|
||||
p.store.Lock()
|
||||
@@ -845,18 +851,23 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
|
||||
|
||||
// Determine amount of peers to prune using our
|
||||
// max connection limit.
|
||||
amountToPrune := len(activePeers) - int(connLimit)
|
||||
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
|
||||
if err != nil {
|
||||
// This should never happen
|
||||
log.WithError(err).Error("Failed to determine amount of peers to prune")
|
||||
return []peer.ID{}
|
||||
}
|
||||
// Also check for inbound peers above our limit.
|
||||
excessInbound := 0
|
||||
excessInbound := uint64(0)
|
||||
if numInboundPeers > inBoundLimit {
|
||||
excessInbound = numInboundPeers - inBoundLimit
|
||||
excessInbound = uint64(numInboundPeers - inBoundLimit)
|
||||
}
|
||||
// Prune the largest amount between excess peers and
|
||||
// excess inbound peers.
|
||||
if excessInbound > amountToPrune {
|
||||
amountToPrune = excessInbound
|
||||
}
|
||||
if amountToPrune < len(peersToPrune) {
|
||||
if amountToPrune < uint64(len(peersToPrune)) {
|
||||
peersToPrune = peersToPrune[:amountToPrune]
|
||||
}
|
||||
ids := make([]peer.ID, 0, len(peersToPrune))
|
||||
|
||||
@@ -40,7 +40,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("index", int64(index)))
|
||||
span.AddAttributes(trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
|
||||
if s.dv5Listener == nil {
|
||||
// return if discovery isn't set
|
||||
@@ -138,7 +138,7 @@ func (s *Service) hasPeerWithSubnet(topic string) bool {
|
||||
// In the event peer threshold is lower, we will choose the lower
|
||||
// threshold.
|
||||
minPeers := mathutil.Min(1, uint64(flags.Get().MinimumPeersPerSubnet))
|
||||
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers)
|
||||
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers) // lint:ignore uintcast -- Min peers can be safely cast to int.
|
||||
}
|
||||
|
||||
// Updates the service's discv5 listener record's attestation subnet
|
||||
@@ -195,6 +195,7 @@ func attSubnets(record *enr.Record) ([]uint64, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// lint:ignore uintcast -- subnet count can be safely cast to int.
|
||||
if len(bitV) != byteCount(int(attestationSubnetCount)) {
|
||||
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
}
|
||||
@@ -214,6 +215,7 @@ func syncSubnets(record *enr.Record) ([]uint64, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// lint:ignore uintcast -- subnet count can be safely cast to int.
|
||||
if len(bitV) != byteCount(int(syncCommsSubnetCount)) {
|
||||
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
}
|
||||
// Initialize a pointer to eth1 chain's history to start our search
|
||||
// from.
|
||||
cursorNum := big.NewInt(int64(latestBlkHeight))
|
||||
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
|
||||
cursorTime := latestBlkTime
|
||||
|
||||
numOfBlocks := uint64(0)
|
||||
@@ -168,9 +168,9 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
|
||||
}
|
||||
if cursorTime > time {
|
||||
return s.findLessTargetEth1Block(ctx, big.NewInt(int64(estimatedBlk)), time)
|
||||
return s.findLessTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
return s.findMoreTargetEth1Block(ctx, big.NewInt(int64(estimatedBlk)), time)
|
||||
return s.findMoreTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
|
||||
// Performs a search to find a target eth1 block which is earlier than or equal to the
|
||||
@@ -214,7 +214,7 @@ func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int
|
||||
}
|
||||
|
||||
func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*types.HeaderInfo, error) {
|
||||
bn := big.NewInt(int64(bNum))
|
||||
bn := big.NewInt(0).SetUint64(bNum)
|
||||
exists, info, err := s.headerCache.HeaderInfoByHeight(bn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -109,7 +109,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
// This can happen sometimes when we receive the same log twice from the
|
||||
// ETH1.0 network, and prevents us from updating our trie
|
||||
// with the same log twice, causing an inconsistent state root.
|
||||
index := int64(binary.LittleEndian.Uint64(merkleTreeIndex))
|
||||
index := int64(binary.LittleEndian.Uint64(merkleTreeIndex)) // lint:ignore uintcast -- MerkleTreeIndex should not exceed int64 in your lifetime.
|
||||
if index <= s.lastReceivedMerkleIndex {
|
||||
return nil
|
||||
}
|
||||
@@ -211,7 +211,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
s.chainStartData.Chainstarted = true
|
||||
s.chainStartData.GenesisBlock = blockNumber.Uint64()
|
||||
|
||||
chainStartTime := time.Unix(int64(genesisTime), 0)
|
||||
chainStartTime := time.Unix(int64(genesisTime), 0) // lint:ignore uintcast -- Genesis time wont exceed int64 in your lifetime.
|
||||
|
||||
for i := range s.chainStartData.ChainstartDeposits {
|
||||
proof, err := s.depositTrie.MerkleProof(i)
|
||||
@@ -303,8 +303,8 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
Addresses: []common.Address{
|
||||
s.cfg.depositContractAddr,
|
||||
},
|
||||
FromBlock: big.NewInt(int64(start)),
|
||||
ToBlock: big.NewInt(int64(end)),
|
||||
FromBlock: big.NewInt(0).SetUint64(start),
|
||||
ToBlock: big.NewInt(0).SetUint64(end),
|
||||
}
|
||||
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
|
||||
// only change the end block if the remaining logs are below the required log limit.
|
||||
@@ -312,7 +312,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
withinLimit := remainingLogs < depositlogRequestLimit
|
||||
aboveFollowHeight := end >= latestFollowHeight
|
||||
if withinLimit && aboveFollowHeight {
|
||||
query.ToBlock = big.NewInt(int64(latestFollowHeight))
|
||||
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
|
||||
end = latestFollowHeight
|
||||
}
|
||||
logs, err := s.httpLogger.FilterLogs(ctx, query)
|
||||
@@ -391,7 +391,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
if fState != nil && !fState.IsNil() && fState.Eth1DepositIndex() > 0 {
|
||||
s.cfg.depositCache.PrunePendingDeposits(ctx, int64(fState.Eth1DepositIndex()))
|
||||
s.cfg.depositCache.PrunePendingDeposits(ctx, int64(fState.Eth1DepositIndex())) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -413,11 +413,11 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
}
|
||||
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
|
||||
// Cache eth1 block header here.
|
||||
_, err := s.BlockHashByHeight(ctx, big.NewInt(int64(i)))
|
||||
_, err := s.BlockHashByHeight(ctx, big.NewInt(0).SetUint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.ProcessETH1Block(ctx, big.NewInt(int64(i)))
|
||||
err = s.ProcessETH1Block(ctx, big.NewInt(0).SetUint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -613,8 +613,10 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
// accumulates. we finalize them here before we are ready to receive a block.
|
||||
// Otherwise, the first few blocks will be slower to compute as we will
|
||||
// hold the lock and be busy finalizing the deposits.
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex))
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex)) // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
|
||||
// lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
if err = s.cfg.depositCache.PruneProofs(ctx, int64(currIndex)); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
@@ -662,7 +664,7 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
|
||||
err := error(nil)
|
||||
elems = append(elems, gethRPC.BatchElem{
|
||||
Method: "eth_getBlockByNumber",
|
||||
Args: []interface{}{hexutil.EncodeBig(big.NewInt(int64(i))), false},
|
||||
Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(i)), false},
|
||||
Result: header,
|
||||
Error: err,
|
||||
})
|
||||
@@ -1088,7 +1090,7 @@ func dedupEndpoints(endpoints []string) []string {
|
||||
func eth1HeadIsBehind(timestamp uint64) bool {
|
||||
timeout := prysmTime.Now().Add(-eth1Threshold)
|
||||
// check that web3 client is syncing
|
||||
return time.Unix(int64(timestamp), 0).Before(timeout)
|
||||
return time.Unix(int64(timestamp), 0).Before(timeout) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime.
|
||||
}
|
||||
|
||||
func (s *Service) primaryConnected() bool {
|
||||
|
||||
@@ -545,7 +545,7 @@ func (is *infostream) depositQueueTimestamp(eth1BlockNumber *big.Int) (uint64, e
|
||||
is.eth1BlocktimesMutex.Unlock()
|
||||
|
||||
followTime := time.Duration(params.BeaconConfig().Eth1FollowDistance*params.BeaconConfig().SecondsPerETH1Block) * time.Second
|
||||
eth1UnixTime := time.Unix(int64(blockTimestamp), 0).Add(followTime)
|
||||
eth1UnixTime := time.Unix(int64(blockTimestamp), 0).Add(followTime) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime
|
||||
|
||||
period := uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
votingPeriod := time.Duration(period*params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
)
|
||||
|
||||
// FieldTrie is the representation of the representative
|
||||
@@ -110,11 +111,15 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
iNumOfElems, err := pmath.Int(numOfElems)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
// We remove the duplicates here in order to prevent
|
||||
// duplicated insertions into the trie.
|
||||
newIndices := []uint64{}
|
||||
indexExists := make(map[uint64]bool)
|
||||
newRoots := make([][32]byte, 0, len(fieldRoots)/int(numOfElems))
|
||||
newRoots := make([][32]byte, 0, len(fieldRoots)/iNumOfElems)
|
||||
for i, idx := range indices {
|
||||
startIdx := idx / numOfElems
|
||||
if indexExists[startIdx] {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
@@ -57,7 +58,7 @@ func validateElements(field types.FieldIndex, dataType types.DataType, elements
|
||||
length *= comLength
|
||||
}
|
||||
val := reflect.Indirect(reflect.ValueOf(elements))
|
||||
if val.Len() > int(length) {
|
||||
if uint64(val.Len()) > length {
|
||||
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(version.Phase0), val.Len(), length)
|
||||
}
|
||||
return nil
|
||||
@@ -318,6 +319,10 @@ func handleBalanceSlice(val, indices []uint64, convertAll bool) ([][32]byte, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iNumOfElems, err := pmath.Int(numOfElems)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
roots := [][32]byte{}
|
||||
for _, idx := range indices {
|
||||
// We split the indexes into their relevant groups. Balances
|
||||
@@ -325,14 +330,14 @@ func handleBalanceSlice(val, indices []uint64, convertAll bool) ([][32]byte, err
|
||||
startIdx := idx / numOfElems
|
||||
startGroup := startIdx * numOfElems
|
||||
chunk := [32]byte{}
|
||||
sizeOfElem := len(chunk) / int(numOfElems)
|
||||
sizeOfElem := len(chunk) / iNumOfElems
|
||||
for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 {
|
||||
wantedVal := uint64(0)
|
||||
// We are adding chunks in sets of 4, if the set is at the edge of the array
|
||||
// then you will need to zero out the rest of the chunk. Ex : 41 indexes,
|
||||
// so 41 % 4 = 1 . There are 3 indexes, which do not exist yet but we
|
||||
// have to add in as a root. These 3 indexes are then given a 'zero' value.
|
||||
if int(j) < len(val) {
|
||||
if j < uint64(len(val)) {
|
||||
wantedVal = val[j]
|
||||
}
|
||||
binary.LittleEndian.PutUint64(chunk[i:i+sizeOfElem], wantedVal)
|
||||
|
||||
@@ -123,7 +123,7 @@ func (b *BeaconState) ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]by
|
||||
numOfVals := len(b.validators)
|
||||
|
||||
idx, ok := b.valMapHandler.Get(key)
|
||||
if ok && numOfVals <= int(idx) {
|
||||
if ok && types.ValidatorIndex(numOfVals) <= idx {
|
||||
return types.ValidatorIndex(0), false
|
||||
}
|
||||
return idx, ok
|
||||
|
||||
@@ -130,7 +130,7 @@ func (b *BeaconState) ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]by
|
||||
numOfVals := len(b.validators)
|
||||
|
||||
idx, ok := b.valMapHandler.Get(key)
|
||||
if ok && numOfVals <= int(idx) {
|
||||
if ok && types.ValidatorIndex(numOfVals) <= idx {
|
||||
return types.ValidatorIndex(0), false
|
||||
}
|
||||
return idx, ok
|
||||
|
||||
@@ -124,7 +124,7 @@ func (b *BeaconState) ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]by
|
||||
numOfVals := len(b.validators)
|
||||
|
||||
idx, ok := b.valMapHandler.Get(key)
|
||||
if ok && numOfVals <= int(idx) {
|
||||
if ok && types.ValidatorIndex(numOfVals) <= idx {
|
||||
return types.ValidatorIndex(0), false
|
||||
}
|
||||
return idx, ok
|
||||
|
||||
@@ -20,6 +20,7 @@ var ErrFutureSlotRequested = errors.New("cannot replay to future slots")
|
||||
var ErrNoCanonicalBlockForSlot = errors.New("none of the blocks found in the db slot index are canonical")
|
||||
var ErrNoBlocksBelowSlot = errors.New("no blocks found in db below slot")
|
||||
var ErrInvalidDBBlock = errors.New("invalid block found in database")
|
||||
var ErrReplayTargetSlotExceeded = errors.New("desired replay slot is less than state's slot")
|
||||
|
||||
// HistoryAccessor describes the minimum set of database methods needed to support the ReplayerBuilder.
|
||||
type HistoryAccessor interface {
|
||||
@@ -54,11 +55,10 @@ type Replayer interface {
|
||||
var _ Replayer = &stateReplayer{}
|
||||
|
||||
type stateReplayer struct {
|
||||
s state.BeaconState
|
||||
descendants []block.SignedBeaconBlock
|
||||
target types.Slot
|
||||
method retrievalMethod
|
||||
chainer chainer
|
||||
s state.BeaconState
|
||||
target types.Slot
|
||||
method retrievalMethod
|
||||
chainer chainer
|
||||
}
|
||||
|
||||
// ReplayBlocks applies all the blocks that were accumulated when building the Replayer.
|
||||
@@ -127,28 +127,31 @@ func (rs *stateReplayer) ReplayToSlot(ctx context.Context, replayTo types.Slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to ReplayBlocks")
|
||||
}
|
||||
|
||||
if replayTo > s.Slot() {
|
||||
start := time.Now()
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": s.Slot(),
|
||||
"endSlot": replayTo,
|
||||
"diff": replayTo - s.Slot(),
|
||||
}).Debug("calling process_slots on remaining slots")
|
||||
|
||||
if replayTo > s.Slot() {
|
||||
// err will be handled after the bookend log
|
||||
s, err = ReplayProcessSlots(ctx, s, replayTo)
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": duration,
|
||||
}).Debug("time spent in process_slots")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("ReplayToSlot failed to seek to slot %d after applying blocks", replayTo))
|
||||
}
|
||||
if replayTo < s.Slot() {
|
||||
return nil, errors.Wrapf(ErrReplayTargetSlotExceeded, "slot desired=%d, state.slot=%d", replayTo, s.Slot())
|
||||
}
|
||||
if replayTo == s.Slot() {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": s.Slot(),
|
||||
"endSlot": replayTo,
|
||||
"diff": replayTo - s.Slot(),
|
||||
}).Debug("calling process_slots on remaining slots")
|
||||
|
||||
// err will be handled after the bookend log
|
||||
s, err = ReplayProcessSlots(ctx, s, replayTo)
|
||||
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": duration,
|
||||
}).Debug("time spent in process_slots")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("ReplayToSlot failed to seek to slot %d after applying blocks", replayTo))
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -6,10 +6,9 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
|
||||
@@ -64,7 +64,7 @@ func ReturnTrieLayerVariable(elements [][32]byte, length uint64) [][]*[32]byte {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
buffer.Grow(64)
|
||||
|
||||
for i := 0; i < int(depth); i++ {
|
||||
for i := uint8(0); i < depth; i++ {
|
||||
layerLen := len(layers[i])
|
||||
oddNodeLength := layerLen%2 == 1
|
||||
if features.Get().EnableVectorizedHTR {
|
||||
@@ -126,10 +126,13 @@ func RecomputeFromLayer(changedLeaves [][32]byte, changedIdx []uint64, layer [][
|
||||
}
|
||||
|
||||
root := *layer[0][0]
|
||||
var err error
|
||||
|
||||
for _, idx := range changedIdx {
|
||||
root, layer, err = recomputeRootFromLayer(int(idx), layer, leaves, hasher)
|
||||
ii, err := math.Int(idx)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, err
|
||||
}
|
||||
root, layer, err = recomputeRootFromLayer(ii, layer, leaves, hasher)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, err
|
||||
}
|
||||
@@ -144,10 +147,13 @@ func RecomputeFromLayerVariable(changedLeaves [][32]byte, changedIdx []uint64, l
|
||||
return *layer[0][0], layer, nil
|
||||
}
|
||||
root := *layer[len(layer)-1][0]
|
||||
var err error
|
||||
|
||||
for i, idx := range changedIdx {
|
||||
root, layer, err = recomputeRootFromLayerVariable(int(idx), changedLeaves[i], layer, hasher)
|
||||
ii, err := math.Int(idx)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, err
|
||||
}
|
||||
root, layer, err = recomputeRootFromLayerVariable(ii, changedLeaves[i], layer, hasher)
|
||||
if err != nil {
|
||||
return [32]byte{}, nil, err
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ func (b *BeaconState) ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]by
|
||||
numOfVals := len(b.state.Validators)
|
||||
|
||||
idx, ok := b.valMapHandler.Get(key)
|
||||
if ok && numOfVals <= int(idx) {
|
||||
if ok && types.ValidatorIndex(numOfVals) <= idx {
|
||||
return types.ValidatorIndex(0), false
|
||||
}
|
||||
return idx, ok
|
||||
|
||||
@@ -139,7 +139,7 @@ func (b *BeaconState) ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]by
|
||||
numOfVals := len(b.state.Validators)
|
||||
|
||||
idx, ok := b.valMapHandler.Get(key)
|
||||
if ok && numOfVals <= int(idx) {
|
||||
if ok && types.ValidatorIndex(numOfVals) <= idx {
|
||||
return types.ValidatorIndex(0), false
|
||||
}
|
||||
return idx, ok
|
||||
|
||||
@@ -139,7 +139,7 @@ func (b *BeaconState) ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]by
|
||||
numOfVals := len(b.state.Validators)
|
||||
|
||||
idx, ok := b.valMapHandler.Get(key)
|
||||
if ok && numOfVals <= int(idx) {
|
||||
if ok && types.ValidatorIndex(numOfVals) <= idx {
|
||||
return types.ValidatorIndex(0), false
|
||||
}
|
||||
return idx, ok
|
||||
|
||||
@@ -108,7 +108,7 @@ func (f *blocksFetcher) nonSkippedSlotAfterWithPeersTarget(
|
||||
return 0, err
|
||||
}
|
||||
for ind := slot + 1; ind < upperBoundSlot; ind += (slotsPerEpoch * slotsPerEpoch) / 2 {
|
||||
start := ind.Add(uint64(f.rand.Intn(int(slotsPerEpoch))))
|
||||
start := ind.Add(uint64(f.rand.Intn(int(slotsPerEpoch)))) // lint:ignore uintcast -- Slots per epoch will never exceed int64.
|
||||
nextSlot, err := fetch(peers[pidInd%len(peers)], start, uint64(slotsPerEpoch/2), uint64(slotsPerEpoch))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -151,8 +151,8 @@ func (s *Service) Resync() error {
|
||||
|
||||
// Set it to false since we are syncing again.
|
||||
s.synced.UnSet()
|
||||
defer func() { s.synced.Set() }() // Reset it at the end of the method.
|
||||
genesis := time.Unix(int64(headState.GenesisTime()), 0)
|
||||
defer func() { s.synced.Set() }() // Reset it at the end of the method.
|
||||
genesis := time.Unix(int64(headState.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
|
||||
s.waitForMinimumPeers()
|
||||
if err = s.roundRobinSync(genesis); err != nil {
|
||||
|
||||
@@ -69,7 +69,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "processPendingBlocks.InnerLoop")
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(slot)))
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(slot))) // lint:ignore uintcast -- This conversion is OK for tracing.
|
||||
|
||||
s.pendingQueueLock.RLock()
|
||||
bs := s.pendingBlocksInCache(slot)
|
||||
|
||||
@@ -60,8 +60,8 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
remainingBucketCapacity := blockLimiter.Remaining(stream.Conn().RemotePeer().String())
|
||||
span.AddAttributes(
|
||||
trace.Int64Attribute("start", int64(startSlot)),
|
||||
trace.Int64Attribute("end", int64(endReqSlot)),
|
||||
trace.Int64Attribute("start", int64(startSlot)), // lint:ignore uintcast -- This conversion is OK for tracing.
|
||||
trace.Int64Attribute("end", int64(endReqSlot)), // lint:ignore uintcast -- This conversion is OK for tracing.
|
||||
trace.Int64Attribute("step", int64(m.Step)),
|
||||
trace.Int64Attribute("count", int64(m.Count)),
|
||||
trace.StringAttribute("peer", stream.Conn().RemotePeer().Pretty()),
|
||||
|
||||
@@ -79,7 +79,7 @@ func (s *Service) maintainPeerStatuses() {
|
||||
// resyncIfBehind checks periodically to see if we are in normal sync but have fallen behind our peers
|
||||
// by more than an epoch, in which case we attempt a resync using the initial sync method to catch up.
|
||||
func (s *Service) resyncIfBehind() {
|
||||
millisecondsPerEpoch := int64(params.BeaconConfig().SlotsPerEpoch.Mul(1000).Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
millisecondsPerEpoch := params.BeaconConfig().SlotsPerEpoch.Mul(1000).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
// Run sixteen times per epoch.
|
||||
interval := time.Duration(millisecondsPerEpoch/16) * time.Millisecond
|
||||
async.RunEvery(s.ctx, interval, func() {
|
||||
|
||||
23
build/bazel/BUILD.bazel
Normal file
23
build/bazel/BUILD.bazel
Normal file
@@ -0,0 +1,23 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"bazel.go",
|
||||
"data_path.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/build/bazel",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//testing/require:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["bazel_test.go"],
|
||||
deps = [":go_default_library"],
|
||||
)
|
||||
103
build/bazel/bazel.go
Normal file
103
build/bazel/bazel.go
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright 2015 The Cockroach Authors.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
//go:build bazel
|
||||
// +build bazel
|
||||
|
||||
package bazel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
inner "github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
)
|
||||
|
||||
// Return true iff this library was built with Bazel.
|
||||
func BuiltWithBazel() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// FindBinary is a convenience wrapper around the rules_go variant.
|
||||
func FindBinary(pkg, name string) (string, bool) {
|
||||
return inner.FindBinary(pkg, name)
|
||||
}
|
||||
|
||||
// Runfile is a convenience wrapper around the rules_go variant.
|
||||
func Runfile(path string) (string, error) {
|
||||
return inner.Runfile(path)
|
||||
}
|
||||
|
||||
// RunfilesPath is a convenience wrapper around the rules_go variant.
|
||||
func RunfilesPath() (string, error) {
|
||||
return inner.RunfilesPath()
|
||||
}
|
||||
|
||||
// TestTmpDir is a convenience wrapper around the rules_go variant.
|
||||
func TestTmpDir() string {
|
||||
return inner.TestTmpDir()
|
||||
}
|
||||
|
||||
// NewTmpDir is a convenience wrapper around the rules_go variant.
|
||||
// The caller is responsible for cleaning the directory up after use.
|
||||
func NewTmpDir(prefix string) (string, error) {
|
||||
return inner.NewTmpDir(prefix)
|
||||
}
|
||||
|
||||
// Updates the current environment to use the Go toolchain that Bazel built this
|
||||
// binary/test with (updates the `PATH`/`GOROOT`/`GOCACHE` environment
|
||||
// variables).
|
||||
// If you want to use this function, your binary/test target MUST have
|
||||
// `@go_sdk//:files` in its `data` -- this will make sure the whole toolchain
|
||||
// gets pulled into the sandbox as well. Generally, this function should only
|
||||
// be called in init().
|
||||
func SetGoEnv() {
|
||||
gobin, err := Runfile("bin/go")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := os.Setenv("PATH", fmt.Sprintf("%s%c%s", filepath.Dir(gobin), os.PathListSeparator, os.Getenv("PATH"))); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// GOPATH has to be set to some value (not equal to GOROOT) in order for `go env` to work.
|
||||
// See https://github.com/golang/go/issues/43938 for the details.
|
||||
// Specify a name under the system TEMP/TMP directory in order to be platform agnostic.
|
||||
if err := os.Setenv("GOPATH", filepath.Join(os.TempDir(), "nonexist-gopath")); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := os.Setenv("GOROOT", filepath.Dir(filepath.Dir(gobin))); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := os.Setenv("GOCACHE", path.Join(inner.TestTmpDir(), ".gocache")); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Name of the environment variable containing the bazel target path
|
||||
// (//pkg/cmd/foo:bar).
|
||||
const testTargetEnv = "TEST_TARGET"
|
||||
|
||||
// RelativeTestTargetPath returns relative path to the package
|
||||
// of the current test.
|
||||
func RelativeTestTargetPath() string {
|
||||
target := os.Getenv(testTargetEnv)
|
||||
if target == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Drop target name.
|
||||
if last := strings.LastIndex(target, ":"); last > 0 {
|
||||
target = target[:last]
|
||||
}
|
||||
return strings.TrimPrefix(target, "//")
|
||||
}
|
||||
13
build/bazel/bazel_test.go
Normal file
13
build/bazel/bazel_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package bazel_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/build/bazel"
|
||||
)
|
||||
|
||||
func TestBuildWithBazel(t *testing.T) {
|
||||
if !bazel.BuiltWithBazel() {
|
||||
t.Error("not built with Bazel")
|
||||
}
|
||||
}
|
||||
41
build/bazel/data_path.go
Normal file
41
build/bazel/data_path.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2020 The Cockroach Authors.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
package bazel
|
||||
|
||||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
// TestDataPath returns a path to an asset in the testdata directory. It knows
|
||||
// to access accesses the right path when executing under bazel.
|
||||
//
|
||||
// For example, if there is a file testdata/a.txt, you can get a path to that
|
||||
// file using TestDataPath(t, "a.txt").
|
||||
func TestDataPath(t testing.TB, relative ...string) string {
|
||||
relative = append([]string{"testdata"}, relative...)
|
||||
// dev notifies the library that the test is running in a subdirectory of the
|
||||
// workspace with the environment variable below.
|
||||
if BuiltWithBazel() {
|
||||
runfiles, err := RunfilesPath()
|
||||
require.NoError(t, err)
|
||||
return path.Join(runfiles, RelativeTestTargetPath(), path.Join(relative...))
|
||||
}
|
||||
|
||||
// Otherwise we're in the package directory and can just return a relative path.
|
||||
ret := path.Join(relative...)
|
||||
ret, err := filepath.Abs(ret)
|
||||
require.NoError(t, err)
|
||||
return ret
|
||||
}
|
||||
57
build/bazel/non_bazel.go
Normal file
57
build/bazel/non_bazel.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2015 The Cockroach Authors.
|
||||
//
|
||||
// Use of this software is governed by the Business Source License
|
||||
// included in the file licenses/BSL.txt.
|
||||
//
|
||||
// As of the Change Date specified in that file, in accordance with
|
||||
// the Business Source License, use of this software will be governed
|
||||
// by the Apache License, Version 2.0, included in the file
|
||||
// licenses/APL.txt.
|
||||
|
||||
//go:build !bazel
|
||||
// +build !bazel
|
||||
|
||||
package bazel
|
||||
|
||||
// This file contains stub implementations for non-bazel builds.
|
||||
// See bazel.go for full documentation on the contracts of these functions.
|
||||
|
||||
// BuiltWithBazel returns true iff this library was built with Bazel.
|
||||
func BuiltWithBazel() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// FindBinary is not implemented.
|
||||
func FindBinary(pkg, name string) (string, bool) {
|
||||
panic("not build with Bazel")
|
||||
}
|
||||
|
||||
// Runfile is not implemented.
|
||||
func Runfile(string) (string, error) {
|
||||
panic("not built with Bazel")
|
||||
}
|
||||
|
||||
// RunfilesPath is not implemented.
|
||||
func RunfilesPath() (string, error) {
|
||||
panic("not built with Bazel")
|
||||
}
|
||||
|
||||
// TestTmpDir is not implemented.
|
||||
func TestTmpDir() string {
|
||||
panic("not built with Bazel")
|
||||
}
|
||||
|
||||
// NewTmpDir is not implemented.
|
||||
func NewTmpDir(prefix string) (string, error) {
|
||||
panic("not built with Bazel")
|
||||
}
|
||||
|
||||
// RelativeTestTargetPath is not implemented.
|
||||
func RelativeTestTargetPath() string {
|
||||
panic("not built with Bazel")
|
||||
}
|
||||
|
||||
// SetGoEnv is not implemented.
|
||||
func SetGoEnv() {
|
||||
panic("not built with Bazel")
|
||||
}
|
||||
@@ -181,13 +181,13 @@ func (m *SparseMerkleTrie) ToProto() *protodb.SparseMerkleTrie {
|
||||
}
|
||||
|
||||
// VerifyMerkleProofWithDepth verifies a Merkle branch against a root of a trie.
|
||||
func VerifyMerkleProofWithDepth(root, item []byte, merkleIndex int, proof [][]byte, depth uint64) bool {
|
||||
if len(proof) != int(depth)+1 {
|
||||
func VerifyMerkleProofWithDepth(root, item []byte, merkleIndex uint64, proof [][]byte, depth uint64) bool {
|
||||
if uint64(len(proof)) != depth+1 {
|
||||
return false
|
||||
}
|
||||
node := bytesutil.ToBytes32(item)
|
||||
for i := 0; i <= int(depth); i++ {
|
||||
if (uint64(merkleIndex) / math.PowerOf2(uint64(i)) % 2) != 0 {
|
||||
for i := uint64(0); i <= depth; i++ {
|
||||
if (merkleIndex / math.PowerOf2(i) % 2) != 0 {
|
||||
node = hash.Hash(append(proof[i], node[:]...))
|
||||
} else {
|
||||
node = hash.Hash(append(node[:], proof[i]...))
|
||||
|
||||
48
deps.bzl
48
deps.bzl
@@ -1327,6 +1327,18 @@ def prysm_deps():
|
||||
sum = "h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=",
|
||||
version = "v1.4.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_gostaticanalysis_comment",
|
||||
importpath = "github.com/gostaticanalysis/comment",
|
||||
sum = "h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q=",
|
||||
version = "v1.4.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_gostaticanalysis_testutil",
|
||||
importpath = "github.com/gostaticanalysis/testutil",
|
||||
sum = "h1:d2/eIbH9XjD1fFwD5SHv8x168fjbQ9PB8hvs8DSEC08=",
|
||||
version = "v0.3.1-0.20210208050101-bfb5c8eec0e4",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_graph_gophers_graphql_go",
|
||||
@@ -1461,8 +1473,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_hashicorp_go_version",
|
||||
importpath = "github.com/hashicorp/go-version",
|
||||
sum = "h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=",
|
||||
version = "v1.2.0",
|
||||
sum = "h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=",
|
||||
version = "v1.2.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -2822,6 +2834,25 @@ def prysm_deps():
|
||||
sum = "h1:nY8Hti+WKaP0cRsSeQ026wU03QsM762XBeCXBb9NAWI=",
|
||||
version = "v0.2.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_otiai10_copy",
|
||||
importpath = "github.com/otiai10/copy",
|
||||
sum = "h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=",
|
||||
version = "v1.2.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_otiai10_curr",
|
||||
importpath = "github.com/otiai10/curr",
|
||||
sum = "h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI=",
|
||||
version = "v1.0.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_otiai10_mint",
|
||||
importpath = "github.com/otiai10/mint",
|
||||
sum = "h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc=",
|
||||
version = "v1.3.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_pact_foundation_pact_go",
|
||||
importpath = "github.com/pact-foundation/pact-go",
|
||||
@@ -3464,6 +3495,19 @@ def prysm_deps():
|
||||
sum = "h1:fj5tQ8acgNUr6O8LEplsxDhUIe2573iLkJc+PqnzZTI=",
|
||||
version = "v0.0.0-20191217153810-f85b25db303b",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_tenntenn_modver",
|
||||
importpath = "github.com/tenntenn/modver",
|
||||
sum = "h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA=",
|
||||
version = "v1.0.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_tenntenn_text_transform",
|
||||
importpath = "github.com/tenntenn/text/transform",
|
||||
sum = "h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=",
|
||||
version = "v0.0.0-20200319021203-7eef512accb3",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_thomaso_mirodin_intmath",
|
||||
importpath = "github.com/thomaso-mirodin/intmath",
|
||||
|
||||
@@ -190,7 +190,7 @@ func ToLowInt64(x []byte) int64 {
|
||||
}
|
||||
// Use the first 8 bytes.
|
||||
x = x[:8]
|
||||
return int64(binary.LittleEndian.Uint64(x))
|
||||
return int64(binary.LittleEndian.Uint64(x)) // lint:ignore uintcast -- A negative number might be the expected result.
|
||||
}
|
||||
|
||||
// SafeCopyRootAtIndex takes a copy of an 32-byte slice in a slice of byte slices. Returns error if index out of range.
|
||||
|
||||
@@ -206,7 +206,7 @@ func MerkleizeVector(elements [][32]byte, length uint64) [32]byte {
|
||||
if len(elements) == 0 {
|
||||
return trie.ZeroHashes[depth]
|
||||
}
|
||||
for i := 0; i < int(depth); i++ {
|
||||
for i := uint8(0); i < depth; i++ {
|
||||
layerLen := len(elements)
|
||||
oddNodeLength := layerLen%2 == 1
|
||||
if oddNodeLength {
|
||||
|
||||
1
go.mod
1
go.mod
@@ -25,6 +25,7 @@ require (
|
||||
github.com/google/gofuzz v1.2.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gostaticanalysis/comment v1.4.2
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.0.1
|
||||
|
||||
13
go.sum
13
go.sum
@@ -465,6 +465,9 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q=
|
||||
github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM=
|
||||
github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/gregjones/httpcache v0.0.0-20170920190843-316c5e0ff04e/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
@@ -500,6 +503,7 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
@@ -1032,6 +1036,11 @@ github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
|
||||
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
|
||||
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
@@ -1247,6 +1256,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU=
|
||||
github.com/templexxx/xor v0.0.0-20191217153810-f85b25db303b/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4=
|
||||
github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
|
||||
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
|
||||
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e h1:cR8/SYRgyQCt5cNCMniB/ZScMkhI9nk8U5C7SbISXjo=
|
||||
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e/go.mod h1:Tu4lItkATkonrYuvtVjG0/rhy15qrNGNTjPdaphtZ/8=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
@@ -1431,6 +1442,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
@@ -1700,6 +1712,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w=
|
||||
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
|
||||
@@ -9,6 +9,19 @@ import (
|
||||
"github.com/thomaso-mirodin/intmath/u64"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// The Int function assumes that the operating system is 64 bit. In any case, Ethereum
|
||||
// consensus layer uses 64 bit values almost exclusively so 64 bit OS requirement should
|
||||
// already be established. This panic is a strict fail fast feedback to alert 32 bit users
|
||||
// that they are not supported.
|
||||
if stdmath.MaxUint < stdmath.MaxUint64 {
|
||||
panic("Prysm is only supported on 64 bit OS")
|
||||
}
|
||||
}
|
||||
|
||||
// ErrOverflow occurs when an operation exceeds max or minimum values.
|
||||
var ErrOverflow = errors.New("integer overflow")
|
||||
|
||||
// Common square root values.
|
||||
var squareRootTable = map[uint64]uint64{
|
||||
4: 2,
|
||||
@@ -121,3 +134,28 @@ func Sub64(a, b uint64) (uint64, error) {
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Int returns the integer value of the uint64 argument. If there is an overlow, then an error is
|
||||
// returned.
|
||||
func Int(u uint64) (int, error) {
|
||||
if u > stdmath.MaxInt {
|
||||
return 0, ErrOverflow
|
||||
}
|
||||
return int(u), nil // lint:ignore uintcast -- This is the preferred method of casting uint64 to int.
|
||||
}
|
||||
|
||||
// AddInt adds two or more integers and checks for integer overflows.
|
||||
func AddInt(i ...int) (int, error) {
|
||||
var sum int
|
||||
for _, ii := range i {
|
||||
if ii > 0 && sum > stdmath.MaxInt-ii {
|
||||
return 0, ErrOverflow
|
||||
} else if ii < 0 && sum < stdmath.MinInt-ii {
|
||||
return 0, ErrOverflow
|
||||
}
|
||||
|
||||
sum += ii
|
||||
|
||||
}
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package math_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
stdmath "math"
|
||||
"testing"
|
||||
|
||||
@@ -366,3 +367,87 @@ func TestMath_Sub64(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt(t *testing.T) {
|
||||
tests := []struct {
|
||||
arg uint64
|
||||
want int
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
arg: 0,
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
arg: 10000000,
|
||||
want: 10000000,
|
||||
},
|
||||
{
|
||||
arg: stdmath.MaxInt64,
|
||||
want: stdmath.MaxInt64,
|
||||
},
|
||||
{
|
||||
arg: stdmath.MaxInt64 + 1,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprint(tt.arg), func(t *testing.T) {
|
||||
got, err := math.Int(tt.arg)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Int() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("Int() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddInt(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []int
|
||||
want int
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no overflow",
|
||||
args: []int{1, 2, 3, 4, 5},
|
||||
want: 15,
|
||||
},
|
||||
{
|
||||
name: "overflow",
|
||||
args: []int{1, stdmath.MaxInt},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "underflow",
|
||||
args: []int{-1, stdmath.MinInt},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "max int",
|
||||
args: []int{1, stdmath.MaxInt - 1},
|
||||
want: stdmath.MaxInt,
|
||||
},
|
||||
{
|
||||
name: "min int",
|
||||
args: []int{-1, stdmath.MinInt + 1},
|
||||
want: stdmath.MinInt,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := math.AddInt(tt.args...)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("AddInt() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("AddInt() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,5 +163,12 @@
|
||||
".*_test\\.go": "Tests are ok",
|
||||
"io/file/fileutil.go": "Package which defines the proper rules"
|
||||
}
|
||||
},
|
||||
"uintcast": {
|
||||
"exclude_files": {
|
||||
"external/.*": "Third party code",
|
||||
"rules_go_work-.*": "Third party code",
|
||||
".*_test\\.go": "Tests are ok"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["bitlistutils.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation/aggregation/testing",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -50,7 +50,7 @@ func BitlistsWithMultipleBitSet(t testing.TB, n, length, count uint64) []bitfiel
|
||||
lists := make([]bitfield.Bitlist, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
b := bitfield.NewBitlist(length)
|
||||
keys := rand.Perm(int(length))
|
||||
keys := rand.Perm(int(length)) // lint:ignore uintcast -- This is safe in test code.
|
||||
for _, key := range keys[:count] {
|
||||
b.SetBitAt(uint64(key), true)
|
||||
}
|
||||
@@ -67,7 +67,7 @@ func Bitlists64WithMultipleBitSet(t testing.TB, n, length, count uint64) []*bitf
|
||||
lists := make([]*bitfield.Bitlist64, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
b := bitfield.NewBitlist64(length)
|
||||
keys := rand.Perm(int(length))
|
||||
keys := rand.Perm(int(length)) // lint:ignore uintcast -- This is safe in test code.
|
||||
for _, key := range keys[:count] {
|
||||
b.SetBitAt(uint64(key), true)
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ func DeterministicallyGenerateKeys(startIndex, numKeys uint64) ([]bls.SecretKey,
|
||||
secrets []bls.SecretKey
|
||||
publics []bls.PublicKey
|
||||
}
|
||||
// lint:ignore uintcast -- this is safe because we can reasonably expect that the number of keys is less than max int64.
|
||||
results, err := async.Scatter(int(numKeys), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
secs, pubs, err := deterministicallyGenerateKeys(uint64(offset)+startIndex, uint64(entries))
|
||||
return &keys{secrets: secs, publics: pubs}, err
|
||||
|
||||
@@ -197,7 +197,7 @@ func (m *Miner) Start(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txOpts.Nonce = big.NewInt(int64(nonce))
|
||||
txOpts.Nonce = big.NewInt(0).SetUint64(nonce)
|
||||
txOpts.Context = ctx
|
||||
contractAddr, tx, _, err := contracts.DeployDepositContract(txOpts, web3)
|
||||
if err != nil {
|
||||
|
||||
@@ -252,7 +252,7 @@ func sendDeposits(web3 *ethclient.Client, keystoreBytes []byte, num, offset int,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txOps.Nonce = big.NewInt(int64(nonce))
|
||||
txOps.Nonce = big.NewInt(0).SetUint64(nonce)
|
||||
|
||||
contract, err := contracts.NewDepositContract(e2e.TestParams.ContractAddress, web3)
|
||||
if err != nil {
|
||||
|
||||
@@ -75,7 +75,7 @@ func DeterministicDepositsAndKeys(numDeposits uint64) ([]*ethpb.Deposit, []bls.S
|
||||
}
|
||||
}
|
||||
|
||||
depositTrie, _, err := DeterministicDepositTrie(int(numDeposits))
|
||||
depositTrie, _, err := DeterministicDepositTrie(int(numDeposits)) // lint:ignore uintcast
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create deposit trie")
|
||||
}
|
||||
@@ -126,6 +126,7 @@ func DepositsWithBalance(balances []uint64) ([]*ethpb.Deposit, *trie.SparseMerkl
|
||||
// Create the new deposits and add them to the trie.
|
||||
for i := uint64(0); i < numDeposits; i++ {
|
||||
balance := params.BeaconConfig().MaxEffectiveBalance
|
||||
// lint:ignore uintcast -- test code
|
||||
if len(balances) == int(numDeposits) {
|
||||
balance = balances[i]
|
||||
}
|
||||
@@ -140,12 +141,13 @@ func DepositsWithBalance(balances []uint64) ([]*ethpb.Deposit, *trie.SparseMerkl
|
||||
return nil, nil, errors.Wrap(err, "could not tree hash deposit data")
|
||||
}
|
||||
|
||||
// lint:ignore uintcast -- test code
|
||||
if err = sparseTrie.Insert(hashedDeposit[:], int(i)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
depositTrie, _, err := DepositTrieSubset(sparseTrie, int(numDeposits))
|
||||
depositTrie, _, err := DepositTrieSubset(sparseTrie, int(numDeposits)) // lint:ignore uintcast -- test code
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create deposit trie")
|
||||
}
|
||||
@@ -374,6 +376,7 @@ func DeterministicDepositsAndKeysSameValidator(numDeposits uint64) ([]*ethpb.Dep
|
||||
}
|
||||
}
|
||||
|
||||
// lint:ignore uintcast -- test code
|
||||
depositTrie, _, err := DeterministicDepositTrie(int(numDeposits))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create deposit trie")
|
||||
|
||||
@@ -258,6 +258,7 @@ func TestDeterministicGenesisState_100Validators(t *testing.T) {
|
||||
activeValidators, err := helpers.ActiveValidatorCount(context.Background(), beaconState, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// lint:ignore uintcast -- test code
|
||||
if len(privKeys) != int(validatorCount) {
|
||||
t.Fatalf("expected amount of private keys %d to match requested amount of validators %d", len(privKeys), validatorCount)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ const MaxSlotBuffer = uint64(1 << 7)
|
||||
// value.
|
||||
func StartTime(genesis uint64, slot types.Slot) time.Time {
|
||||
duration := time.Second * time.Duration(slot.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
startTime := time.Unix(int64(genesis), 0).Add(duration)
|
||||
startTime := time.Unix(int64(genesis), 0).Add(duration) // lint:ignore uintcast -- Genesis timestamp will not exceed int64 in your lifetime.
|
||||
return startTime
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ func ToTime(genesisTimeSec uint64, slot types.Slot) (time.Time, error) {
|
||||
if err != nil {
|
||||
return time.Unix(0, 0), fmt.Errorf("slot (%d) is in the far distant future: %w", slot, err)
|
||||
}
|
||||
return time.Unix(int64(sTime), 0), nil
|
||||
return time.Unix(int64(sTime), 0), nil // lint:ignore uintcast -- A timestamp will not exceed int64 in your lifetime.
|
||||
}
|
||||
|
||||
// Since computes the number of time slots that have occurred since the given timestamp.
|
||||
@@ -165,7 +165,7 @@ func Since(time time.Time) types.Slot {
|
||||
// provided genesis time.
|
||||
func CurrentSlot(genesisTimeSec uint64) types.Slot {
|
||||
now := prysmTime.Now().Unix()
|
||||
genesis := int64(genesisTimeSec)
|
||||
genesis := int64(genesisTimeSec) // lint:ignore uintcast -- Genesis timestamp will not exceed int64 in your lifetime.
|
||||
if now < genesis {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -12,4 +12,15 @@ go_library(
|
||||
],
|
||||
)
|
||||
|
||||
# gazelle:exclude analyzer_test.go
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["analyzer_test.go"],
|
||||
data = glob(["testdata/**"]) + [
|
||||
"@go_sdk//:files",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//build/bazel:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/analysistest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,9 +3,18 @@ package comparesame
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/build/bazel"
|
||||
"golang.org/x/tools/go/analysis/analysistest"
|
||||
)
|
||||
|
||||
func TestAnalyzer(t *testing.T) {
|
||||
analysistest.Run(t, analysistest.TestData(), Analyzer)
|
||||
func init() {
|
||||
if bazel.BuiltWithBazel() {
|
||||
bazel.SetGoEnv()
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyzer(t *testing.T) {
|
||||
testdata := bazel.TestDataPath(t)
|
||||
analysistest.TestData = func() string { return testdata }
|
||||
analysistest.Run(t, testdata, Analyzer)
|
||||
}
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["compare_len.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/tools/analyzers/comparesame/testdata",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -12,4 +12,15 @@ go_library(
|
||||
],
|
||||
)
|
||||
|
||||
# gazelle:exclude analyzer_test.go
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["analyzer_test.go"],
|
||||
data = glob(["testdata/**"]) + [
|
||||
"@go_sdk//:files",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//build/bazel:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/analysistest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user