Compare commits

..

4 Commits

Author SHA1 Message Date
terence tsao
2e99b97146 More 2022-03-11 07:58:42 -08:00
terence tsao
ad8bbd59fc More 2022-03-11 07:52:48 -08:00
terence tsao
500bee9e4c Rename 2022-03-11 07:50:08 -08:00
Potuz
71af5ecea3 remove optimistic control flag 2022-03-10 17:35:36 -03:00
265 changed files with 789 additions and 3937 deletions

View File

@@ -217,6 +217,3 @@ build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
# Set bazel gotag
build --define gotags=bazel

View File

@@ -1 +1 @@
5.0.0
4.2.2

View File

@@ -10,26 +10,26 @@
# Prysm specific remote-cache properties.
#build:remote-cache --disk_cache=
build:remote-cache --remote_download_minimal
build:remote-cache --remote_download_toplevel
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --remote_local_fallback
build:remote-cache --experimental_remote_cache_async
build:remote-cache --experimental_remote_merkle_tree_cache
build:remote-cache --experimental_action_cache_store_output_metadata
build:remote-cache --experimental_remote_cache_compression
# Enforce stricter environment rules, which eliminates some non-hermetic
# behavior and therefore improves both the remote cache hit rate and the
# correctness and repeatability of the build.
build:remote-cache --incompatible_strict_action_env=true
build --experimental_use_hermetic_linux_sandbox
# Import workspace options.
import %workspace%/.bazelrc
startup --host_jvm_args=-Xmx4g --host_jvm_args=-Xms2g
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
query --repository_cache=/tmp/repositorycache
query --experimental_repository_cache_hardlinks
build --repository_cache=/tmp/repositorycache
build --experimental_repository_cache_hardlinks
build --experimental_strict_action_env
build --disk_cache=/tmp/bazelbuilds
build --experimental_multi_threaded_digest
build --sandbox_tmpfs_path=/tmp
build --verbose_failures
build --announce_rc

View File

@@ -3,7 +3,7 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
prefix = "github.com/prysmaticlabs/prysm"
@@ -16,8 +16,6 @@ exports_files([
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
# gazelle:build_tags bazel
# gazelle:exclude tools/analyzers/**/testdata/**
gazelle(
name = "gazelle",
prefix = prefix,
@@ -127,7 +125,6 @@ nogo(
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",
"//tools/analyzers/recursivelock:go_default_library",
"//tools/analyzers/uintcast:go_default_library",
] + select({
# nogo checks that fail with coverage enabled.
":coverage_enabled": [],

View File

@@ -117,6 +117,13 @@ http_archive(
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
)
git_repository(
name = "graknlabs_bazel_distribution",
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
remote = "https://github.com/graknlabs/bazel-distribution",
shallow_since = "1569509514 +0300",
)
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
@@ -215,7 +222,7 @@ filegroup(
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.1.10"
consensus_spec_version = "v1.1.9"
bls_test_version = "v0.1.1"
@@ -231,7 +238,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "28043009cc2f6fc9804e73c8c1fc2cb27062f1591e6884f3015ae1dd7a276883",
sha256 = "207d9c326ba4fa1f34bab7b6169201c32f2611755db030909a3405873445e0ba",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -247,7 +254,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "bc1a283ca068f310f04d70c4f6a8eaa0b8f7e9318073a8bdc2ee233111b4e339",
sha256 = "a3995b39f412db236b2f1db909f288218da53cb53b9923b71dda9d144d68f40a",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -263,7 +270,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "bbabb482c229ff9d4e2c7b77c992edb452f9d0af7c6d8dd4f922f06a7b101e81",
sha256 = "76cea7a4c8e32d458ad456b54bfbb30bc772481a91954a4cd97e229aa3023b1d",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -278,7 +285,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "408a5524548ad3fcf387f65ac7ec52781d9ee899499720bb12451b48a15818d4",
sha256 = "0fc429684775f943250dce1f9c485ac25e26c6395d7f585c8d1317becec2ace7",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -7,8 +7,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
@@ -325,44 +323,13 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
return false, nil
}
return s.IsOptimisticForRoot(ctx, s.head.root)
return s.cfg.ForkChoiceStore.IsOptimistic(ctx, s.head.root)
}
// IsOptimisticForRoot takes the root and slot as aguments instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
if err == nil {
return optimistic, nil
}
if err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
return false, err
}
ss, err := s.cfg.BeaconDB.StateSummary(ctx, root)
if err != nil {
return false, err
}
if ss == nil {
return false, errInvalidNilSummary
}
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
if err != nil {
return false, err
}
if slots.ToEpoch(ss.Slot) > validatedCheckpoint.Epoch {
return true, nil
}
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
return false, nil
}
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, bytesutil.ToBytes32(validatedCheckpoint.Root))
if err != nil {
return false, err
}
return ss.Slot > lastValidated.Slot, nil
return s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
}
// SetGenesisTime sets the genesis time of beacon chain.

View File

@@ -16,13 +16,10 @@ func TestHeadSlot_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
s.HeadSlot()
<-wait
}
func TestHeadRoot_DataRace(t *testing.T) {
@@ -31,14 +28,11 @@ func TestHeadRoot_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{root: [32]byte{'A'}},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err := s.HeadRoot(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadBlock_DataRace(t *testing.T) {
@@ -47,14 +41,11 @@ func TestHeadBlock_DataRace(t *testing.T) {
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(&ethpb.SignedBeaconBlock{})},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err := s.HeadBlock(context.Background())
require.NoError(t, err)
<-wait
}
func TestHeadState_DataRace(t *testing.T) {
@@ -62,12 +53,9 @@ func TestHeadState_DataRace(t *testing.T) {
s := &Service{
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
}
wait := make(chan struct{})
go func() {
defer close(wait)
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
}()
_, err := s.HeadState(context.Background())
require.NoError(t, err)
<-wait
}

View File

@@ -437,79 +437,3 @@ func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 11
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(optimisticBlock)))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(validatedBlock)))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
}
func TestService_IsOptimisticForRoot__DB_DoublyLinkedTree(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
c.head = &head{root: params.BeaconConfig().ZeroHash}
b := util.NewBeaconBlock()
b.Block.Slot = 10
br, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: br[:], Slot: 10}))
optimisticBlock := util.NewBeaconBlock()
optimisticBlock.Block.Slot = 11
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(optimisticBlock)))
validatedBlock := util.NewBeaconBlock()
validatedBlock.Block.Slot = 9
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(validatedBlock)))
validatedCheckpoint := &ethpb.Checkpoint{Root: br[:]}
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
require.NoError(t, err)
require.Equal(t, true, optimistic)
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &ethpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
require.NoError(t, err)
require.Equal(t, false, validated)
}

View File

@@ -1,4 +1,3 @@
//go:build !develop
// +build !develop
package blockchain

View File

@@ -9,8 +9,4 @@ var (
errNilBestJustifiedInStore = errors.New("nil best justified checkpoint returned from store")
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errNilParentInDB is returned when a nil parent block is returned from the DB.
errNilParentInDB = errors.New("nil parent block in DB")
)

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
@@ -18,7 +17,7 @@ import (
var log = logrus.WithField("prefix", "blockchain")
// logs state transition related data every slot.
func logStateTransitionData(b block.BeaconBlock) error {
func logStateTransitionData(b block.BeaconBlock) {
log := log.WithField("slot", b.Slot())
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
@@ -35,23 +34,13 @@ func logStateTransitionData(b block.BeaconBlock) error {
if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
}
if b.Version() == version.Altair || b.Version() == version.Bellatrix {
if b.Version() == version.Altair {
agg, err := b.Body().SyncAggregate()
if err != nil {
return err
if err == nil {
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
if b.Version() == version.Bellatrix {
p, err := b.Body().ExecutionPayload()
if err != nil {
return err
}
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash)))
log = log.WithField("txCount", len(p.Transactions))
}
log.Info("Finished applying state transition")
return nil
}
func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {

View File

@@ -3,7 +3,6 @@ package blockchain
import (
"testing"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
@@ -12,17 +11,6 @@ import (
)
func Test_logStateTransitionData(t *testing.T) {
payloadBlk := &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
SyncAggregate: &ethpb.SyncAggregate{},
ExecutionPayload: &enginev1.ExecutionPayload{
BlockHash: []byte{1, 2, 3},
Transactions: [][]byte{{}, {}},
},
},
}
wrappedPayloadBlk, err := wrapper.WrappedBeaconBlock(payloadBlk)
require.NoError(t, err)
tests := []struct {
name string
b block.BeaconBlock
@@ -67,15 +55,11 @@ func Test_logStateTransitionData(t *testing.T) {
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
},
{name: "has payload",
b: wrappedPayloadBlk,
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
},
}
for _, tt := range tests {
hook := logTest.NewGlobal()
t.Run(tt.name, func(t *testing.T) {
require.NoError(t, logStateTransitionData(tt.b))
logStateTransitionData(tt.b)
require.LogsContain(t, hook, tt.want)
})
}

View File

@@ -76,9 +76,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
}
}
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, s.headRoot()); err != nil {
return nil, errors.Wrap(err, "could not set block to valid")
}
return payloadID, nil
}
@@ -122,9 +119,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int, hea
// During the transition event, the transition block should be verified for sanity.
if isPreBellatrix(preStateVersion) {
// Handle case where pre-state is Altair but block contains payload.
// To reach here, the block must have contained a valid payload.
return s.validateMergeBlock(ctx, blk)
return nil
}
atTransition, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(header, body)
if err != nil {
@@ -145,38 +140,14 @@ func isPreBellatrix(v int) bool {
//
// Spec pseudocode definition:
// def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
// if is_execution_block(opt_store.blocks[block.parent_root]):
// return True
//
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
// if is_execution_block(opt_store.blocks[justified_root]):
// return True
//
// if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot:
// return True
//
// return False
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
// justified_is_execution_block = is_execution_block(opt_store.blocks[justified_root])
// block_is_deep = block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot
// return justified_is_execution_block or block_is_deep
func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.BeaconBlock) (bool, error) {
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
return true, nil
}
parent, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
if err != nil {
return false, err
}
if parent == nil {
return false, errNilParentInDB
}
parentIsExecutionBlock, err := blocks.ExecutionBlock(parent.Block().Body())
if err != nil {
return false, err
}
if parentIsExecutionBlock {
return true, nil
}
j := s.store.JustifiedCheckpt()
if j == nil {
return false, errNilJustifiedInStore

View File

@@ -36,6 +36,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
@@ -44,7 +45,6 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0))
tests := []struct {
name string
@@ -188,13 +188,6 @@ func Test_NotifyNewPayload(t *testing.T) {
},
},
}
a := &ethpb.SignedBeaconBlockAltair{
Block: &ethpb.BeaconBlockAltair{
Body: &ethpb.BeaconBlockBodyAltair{},
},
}
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
@@ -245,29 +238,10 @@ func Test_NotifyNewPayload(t *testing.T) {
errString: "could not validate execution payload from execution engine: payload status is INVALID",
},
{
name: "altair pre state, altair block",
name: "altair pre state",
postState: bellatrixState,
preState: altairState,
blk: altairBlk,
},
{
name: "altair pre state, happy case",
postState: bellatrixState,
preState: altairState,
blk: func() block.SignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
return b
}(),
blk: bellatrixBlk,
},
{
name: "could not get merge block",
@@ -330,29 +304,27 @@ func Test_NotifyNewPayload(t *testing.T) {
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
var payload *ethpb.ExecutionPayloadHeader
if tt.preState.Version() == version.Bellatrix {
payload, err = tt.preState.LatestExecutionPayloadHeader()
require.NoError(t, err)
}
err := service.notifyNewPayload(ctx, tt.preState.Version(), payload, tt.postState, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
})
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
var payload *ethpb.ExecutionPayloadHeader
if tt.preState.Version() == version.Bellatrix {
payload, err = tt.preState.LatestExecutionPayloadHeader()
require.NoError(t, err)
}
err := service.notifyNewPayload(ctx, tt.preState.Version(), payload, tt.postState, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
}
}
@@ -375,12 +347,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
parentBlk := util.NewBeaconBlockBellatrix()
wrappedParentBlock, err := wrapper.WrappedBellatrixSignedBeaconBlock(parentBlk)
require.NoError(t, err)
parentRoot, err := wrappedParentBlock.Block().HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blk block.BeaconBlock
@@ -392,7 +358,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 1
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
@@ -400,7 +365,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
@@ -412,7 +376,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 200
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedAltairBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
@@ -420,7 +383,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockAltair()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedAltairSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
@@ -432,7 +394,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
@@ -440,7 +401,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(tt, err)
return wr
@@ -452,7 +412,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk: func(tt *testing.T) block.BeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 200
blk.Block.ParentRoot = parentRoot[:]
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
require.NoError(tt, err)
return wr
@@ -460,7 +419,6 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
justified: func(tt *testing.T) block.SignedBeaconBlock {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 32
blk.Block.ParentRoot = parentRoot[:]
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
@@ -485,59 +443,8 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
Root: jroot[:],
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
})
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
candidate, err := service.optimisticCandidateBlock(ctx, tt.blk)
require.NoError(t, err)
require.Equal(t, tt.want, candidate, tt.name)
}
}
func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
payload := &v1.ExecutionPayload{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
BlockNumber: 100,
}
body := &ethpb.BeaconBlockBodyBellatrix{ExecutionPayload: payload}
block := &ethpb.BeaconBlockBellatrix{Body: body, Slot: 200}
rawSigned := &ethpb.SignedBeaconBlockBellatrix{Block: block}
blk := util.HydrateSignedBeaconBlockBellatrix(rawSigned)
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wr))
blkRoot, err := wr.Block().HashTreeRoot()
require.NoError(t, err)
childBlock := util.NewBeaconBlockBellatrix()
childBlock.Block.ParentRoot = blkRoot[:]
// shallow block
childBlock.Block.Slot = 201
wrappedChild, err := wrapper.WrappedBellatrixSignedBeaconBlock(childBlock)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedChild))
candidate, err := service.optimisticCandidateBlock(ctx, wrappedChild.Block())
require.NoError(t, err)
require.Equal(t, true, candidate)
}

View File

@@ -111,6 +111,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return errors.Wrap(err, "could not verify new payload")
}
// TODO(10261) Check optimistic status
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
}

View File

@@ -64,9 +64,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block block.SignedBeaconBloc
return err
}
// Log state transition data.
if err := logStateTransitionData(blockCopy.Block()); err != nil {
return err
}
logStateTransitionData(blockCopy.Block())
return nil
}

View File

@@ -127,7 +127,7 @@ func (s *Service) Start() {
saved := s.cfg.FinalizedStateAtStartUp
if saved != nil && !saved.IsNil() {
if err := s.StartFromSavedState(saved); err != nil {
if err := s.startFromSavedState(saved); err != nil {
log.Fatal(err)
}
} else {
@@ -164,9 +164,9 @@ func (s *Service) Status() error {
return nil
}
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
func (s *Service) startFromSavedState(saved state.BeaconState) error {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0)
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
originRoot, err := s.originRootFromSavedState(s.ctx)

View File

@@ -290,7 +290,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
require.NoError(t, err)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.StartFromSavedState(headState))
require.NoError(t, c.startFromSavedState(headState))
headBlk, err := c.HeadBlock(ctx)
require.NoError(t, err)
assert.DeepEqual(t, headBlock, headBlk.Proto(), "Head block incorrect")
@@ -333,7 +333,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, err)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}))
require.NoError(t, err)
require.NoError(t, c.StartFromSavedState(headState))
require.NoError(t, c.startFromSavedState(headState))
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
@@ -393,7 +393,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
require.NoError(t, err)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stategen.New(beaconDB)), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, c.StartFromSavedState(headState))
require.NoError(t, c.startFromSavedState(headState))
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache
@@ -18,12 +17,10 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
)
const (
// maxBalanceCacheSize defines the max number of active balances can cache.
maxBalanceCacheSize = int(4)
)
var (
// maxBalanceCacheSize defines the max number of active balances can cache.
maxBalanceCacheSize = uint64(4)
// BalanceCacheMiss tracks the number of balance requests that aren't present in the cache.
balanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "total_effective_balance_cache_miss",
@@ -45,7 +42,7 @@ type BalanceCache struct {
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
func NewEffectiveBalanceCache() *BalanceCache {
return &BalanceCache{
cache: lruwrpr.New(maxBalanceCacheSize),
cache: lruwrpr.New(int(maxBalanceCacheSize)),
}
}

View File

@@ -1,4 +1,3 @@
//go:build fuzz
// +build fuzz
package cache

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache
@@ -20,13 +19,11 @@ import (
mathutil "github.com/prysmaticlabs/prysm/math"
)
const (
var (
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
maxCommitteesCacheSize = int(32)
)
maxCommitteesCacheSize = uint64(32)
var (
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "committee_cache_miss",
@@ -58,7 +55,7 @@ func committeeKeyFn(obj interface{}) (string, error) {
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
func NewCommitteesCache() *CommitteeCache {
return &CommitteeCache{
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
CommitteeCache: lruwrpr.New(int(maxCommitteesCacheSize)),
inProgress: make(map[string]bool),
}
}

View File

@@ -1,4 +1,3 @@
//go:build fuzz
// +build fuzz
// This file is used in fuzzer builds to bypass global committee caches.

View File

@@ -33,7 +33,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
require.NoError(t, err)
}
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
}
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
@@ -50,5 +50,5 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
assert.DeepEqual(t, c.SortedIndices, indices)
}
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
}

View File

@@ -102,7 +102,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
}
k := cache.CommitteeCache.Keys()
assert.Equal(t, maxCommitteesCacheSize, len(k))
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
sort.Slice(k, func(i, j int) bool {
return k[i].(string) < k[j].(string)

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,4 +1,3 @@
//go:build fuzz
// +build fuzz
// This file is used in fuzzer builds to bypass proposer indices caches.

View File

@@ -27,7 +27,7 @@ var SubnetIDs = newSubnetIDs()
func newSubnetIDs() *subnetIDs {
// Given a node can calculate committee assignments of current epoch and next epoch.
// Max size is set to 2 epoch length.
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2)) // lint:ignore uintcast -- constant values that would panic on startup if negative.
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
attesterCache := lruwrpr.New(cacheSize)
aggregatorCache := lruwrpr.New(cacheSize)
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache

View File

@@ -1,4 +1,3 @@
//go:build fuzz
// +build fuzz
package cache

View File

@@ -225,7 +225,7 @@ func verifyDeposit(beaconState state.ReadOnlyBeaconState, deposit *ethpb.Deposit
if ok := trie.VerifyMerkleProofWithDepth(
receiptRoot,
leaf[:],
beaconState.Eth1DepositIndex(),
int(beaconState.Eth1DepositIndex()),
deposit.Proof,
params.BeaconConfig().DepositContractTreeDepth,
); !ok {

View File

@@ -17,15 +17,13 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["slot_epoch_test.go"],
embed = [":go_default_library"],
deps = [
":go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v1:go_default_library",
"//config/params:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
],

View File

@@ -46,9 +46,9 @@ func NextEpoch(state state.ReadOnlyBeaconState) types.Epoch {
return slots.ToEpoch(state.Slot()) + 1
}
// HigherEqualThanAltairVersionAndEpoch returns if the input state `s` has a higher version number than Altair state and input epoch `e` is higher equal than fork epoch.
func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e types.Epoch) bool {
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
// AltairCompatible returns if the input state `s` is altair compatible and input epoch `e` is higher equal than fork epoch.
func AltairCompatible(s state.BeaconState, e types.Epoch) bool {
return s.Version() == version.Altair && e >= params.BeaconConfig().AltairForkEpoch
}
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.

View File

@@ -1,17 +1,14 @@
package time_test
package time
import (
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/config/params"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
@@ -45,7 +42,7 @@ func TestCurrentEpoch_OK(t *testing.T) {
for _, tt := range tests {
state, err := v1.InitializeFromProto(&eth.BeaconState{Slot: tt.slot})
require.NoError(t, err)
assert.Equal(t, tt.epoch, time.CurrentEpoch(state), "ActiveCurrentEpoch(%d)", state.Slot())
assert.Equal(t, tt.epoch, CurrentEpoch(state), "ActiveCurrentEpoch(%d)", state.Slot())
}
}
@@ -61,7 +58,7 @@ func TestPrevEpoch_OK(t *testing.T) {
for _, tt := range tests {
state, err := v1.InitializeFromProto(&eth.BeaconState{Slot: tt.slot})
require.NoError(t, err)
assert.Equal(t, tt.epoch, time.PrevEpoch(state), "ActivePrevEpoch(%d)", state.Slot())
assert.Equal(t, tt.epoch, PrevEpoch(state), "ActivePrevEpoch(%d)", state.Slot())
}
}
@@ -79,7 +76,7 @@ func TestNextEpoch_OK(t *testing.T) {
for _, tt := range tests {
state, err := v1.InitializeFromProto(&eth.BeaconState{Slot: tt.slot})
require.NoError(t, err)
assert.Equal(t, tt.epoch, time.NextEpoch(state), "NextEpoch(%d)", state.Slot())
assert.Equal(t, tt.epoch, NextEpoch(state), "NextEpoch(%d)", state.Slot())
}
}
@@ -111,7 +108,7 @@ func TestCanUpgradeToAltair(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToAltair(tt.slot); got != tt.want {
if got := CanUpgradeToAltair(tt.slot); got != tt.want {
t.Errorf("canUpgradeToAltair() = %v, want %v", got, tt.want)
}
})
@@ -146,7 +143,7 @@ func TestCanUpgradeBellatrix(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToBellatrix(tt.slot); got != tt.want {
if got := CanUpgradeToBellatrix(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToBellatrix() = %v, want %v", got, tt.want)
}
})
@@ -181,85 +178,6 @@ func TestCanProcessEpoch_TrueOnEpochsLastSlot(t *testing.T) {
b := &eth.BeaconState{Slot: tt.slot}
s, err := v1.InitializeFromProto(b)
require.NoError(t, err)
assert.Equal(t, tt.canProcessEpoch, time.CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot)
}
}
func TestAltairCompatible(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 1
cfg.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(cfg)
type args struct {
s state.BeaconState
e types.Epoch
}
tests := []struct {
name string
args args
want bool
}{
{
name: "phase0 state",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisState(t, 1)
return st
}(),
},
want: false,
},
{
name: "altair state, altair epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateAltair(t, 1)
return st
}(),
e: params.BeaconConfig().AltairForkEpoch,
},
want: true,
},
{
name: "bellatrix state, bellatrix epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
return st
}(),
e: params.BeaconConfig().BellatrixForkEpoch,
},
want: true,
},
{
name: "bellatrix state, altair epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
return st
}(),
e: params.BeaconConfig().AltairForkEpoch,
},
want: true,
},
{
name: "bellatrix state, phase0 epoch",
args: args{
s: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
return st
}(),
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.HigherEqualThanAltairVersionAndEpoch(tt.args.s, tt.args.e); got != tt.want {
t.Errorf("HigherEqualThanAltairVersionAndEpoch() = %v, want %v", got, tt.want)
}
})
assert.Equal(t, tt.canProcessEpoch, CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot)
}
}

View File

@@ -92,7 +92,7 @@ func ExecuteStateTransition(
func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlot")
defer span.End()
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot())))
prevStateRoot, err := state.HashTreeRoot(ctx)
if err != nil {
@@ -190,7 +190,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
if state == nil || state.IsNil() {
return nil, errors.New("nil state")
}
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot())))
// The block must have a higher slot than parent state.
if state.Slot() >= slot {
@@ -354,7 +354,7 @@ func VerifyOperationLengths(_ context.Context, state state.BeaconState, b block.
func ProcessEpochPrecompute(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessEpochPrecompute")
defer span.End()
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state)))) // lint:ignore uintcast -- This is OK for tracing.
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state))))
if state == nil || state.IsNil() {
return nil, errors.New("nil state")

View File

@@ -45,13 +45,12 @@ type ReadOnlyDatabase interface {
HasArchivedPoint(ctx context.Context, slot types.Slot) bool
LastArchivedRoot(ctx context.Context) [32]byte
LastArchivedSlot(ctx context.Context) (types.Slot, error)
LastValidatedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error)
// Deposit contract related handlers.
DepositContractAddress(ctx context.Context) ([]byte, error)
// Powchain operations.
PowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error)
// Fee reicipients operations.
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error)
// origin checkpoint sync support
OriginBlockRoot(ctx context.Context) ([32]byte, error)
}
@@ -75,7 +74,6 @@ type NoHeadAccessDatabase interface {
// Checkpoint operations.
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
// Deposit contract related handlers.
SaveDepositContractAddress(ctx context.Context, addr common.Address) error
// Powchain operations.
@@ -83,7 +81,7 @@ type NoHeadAccessDatabase interface {
// Run any required database migrations.
RunMigrations(ctx context.Context) error
// Fee reicipients operations.
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, addrs []common.Address) error
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, addrs []common.Address) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error
}

View File

@@ -25,7 +25,6 @@ go_library(
"state_summary.go",
"state_summary_cache.go",
"utils.go",
"validated_checkpoint.go",
"wss.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/kv",
@@ -92,7 +91,6 @@ go_test(
"state_summary_test.go",
"state_test.go",
"utils_test.go",
"validated_checkpoint_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],

View File

@@ -395,13 +395,13 @@ func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]
// FeeRecipientByValidatorID returns the fee recipient for a validator id.
// `ErrNotFoundFeeRecipient` is returned if the validator id is not found.
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error) {
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.FeeRecipientByValidatorID")
defer span.End()
var addr []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(id))
if addr == nil {
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
}
@@ -412,7 +412,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.Validato
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
// Error is returned if `ids` and `recipients` are not the same length.
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, feeRecipients []common.Address) error {
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, feeRecipients []common.Address) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
defer span.End()
@@ -423,7 +423,7 @@ func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
for i, id := range ids {
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(uint64(id)), feeRecipients[i].Bytes()); err != nil {
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(id), feeRecipients[i].Bytes()); err != nil {
return err
}
}

View File

@@ -595,11 +595,11 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
func TestStore_FeeRecipientByValidatorID(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
ids := []types.ValidatorIndex{0, 0, 0}
ids := []uint64{0, 0, 0}
feeRecipients := []common.Address{{}, {}, {}, {}}
require.ErrorContains(t, "validatorIDs and feeRecipients must be the same length", db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
ids = []types.ValidatorIndex{0, 1, 2}
ids = []uint64{0, 1, 2}
feeRecipients = []common.Address{{'a'}, {'b'}, {'c'}}
require.NoError(t, db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
f, err := db.FeeRecipientByValidatorID(ctx, 0)

View File

@@ -175,6 +175,7 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
powchainBucket,
stateSummaryBucket,
stateValidatorsBucket,
validatedTips,
// Indices buckets.
attestationHeadBlockRootBucket,
attestationSourceRootIndicesBucket,

View File

@@ -19,6 +19,7 @@ var (
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
validatedTips = []byte("validated-synced-tips")
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
slotsHasObjectBucket = []byte("slots-has-objects")
@@ -38,13 +39,12 @@ var (
blockRootValidatorHashesBucket = []byte("block-root-validator-hashes")
// Specific item keys.
headBlockRootKey = []byte("head-root")
genesisBlockRootKey = []byte("genesis-root")
depositContractAddressKey = []byte("deposit-contract")
justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
headBlockRootKey = []byte("head-root")
genesisBlockRootKey = []byte("genesis-root")
depositContractAddressKey = []byte("deposit-contract")
justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
// Below keys are used to identify objects are to be fork compatible.
// Objects that are only compatible with specific forks should be prefixed with such keys.

View File

@@ -1,49 +0,0 @@
package kv
import (
"context"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// LastValidatedCheckpoint returns the latest fully validated checkpoint in beacon chain.
func (s *Store) LastValidatedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastValidatedCheckpoint")
defer span.End()
var checkpoint *ethpb.Checkpoint
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(checkpointBucket)
enc := bkt.Get(lastValidatedCheckpointKey)
if enc == nil {
var finErr error
checkpoint, finErr = s.FinalizedCheckpoint(ctx)
return finErr
}
checkpoint = &ethpb.Checkpoint{}
return decode(ctx, enc, checkpoint)
})
return checkpoint, err
}
// SaveLastValidatedCheckpoint saves the last validated checkpoint in beacon chain.
func (s *Store) SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastValidatedCheckpoint")
defer span.End()
enc, err := encode(ctx, checkpoint)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
return errMissingStateForCheckpoint
}
return bucket.Put(lastValidatedCheckpointKey, enc)
})
}

View File

@@ -1,67 +0,0 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"google.golang.org/protobuf/proto"
)
func TestStore_LastValidatedCheckpoint_CanSaveRetrieve(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
root := bytesutil.ToBytes32([]byte{'A'})
cp := &ethpb.Checkpoint{
Epoch: 10,
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, cp))
retrieved, err := db.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(cp, retrieved), "Wanted %v, received %v", cp, retrieved)
}
func TestStore_LastValidatedCheckpoint_DefaultIsFinalized(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
genesis := bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'})
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesis))
blk := util.NewBeaconBlock()
blk.Block.ParentRoot = genesis[:]
blk.Block.Slot = 40
root, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
cp := &ethpb.Checkpoint{
Epoch: 5,
Root: root[:],
}
// a valid chain is required to save finalized checkpoint.
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
// a state is required to save checkpoint
require.NoError(t, db.SaveState(ctx, st, root))
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
retrieved, err := db.LastValidatedCheckpoint(ctx)
require.NoError(t, err)
assert.Equal(t, true, proto.Equal(cp, retrieved), "Wanted %v, received %v", cp, retrieved)
}

View File

@@ -2,7 +2,7 @@ package doublylinkedtree
import "errors"
var ErrNilNode = errors.New("invalid nil or unknown node")
var errNilNode = errors.New("invalid nil or unknown node")
var errInvalidBalance = errors.New("invalid node balance")
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
var errUnknownFinalizedRoot = errors.New("unknown finalized root")

View File

@@ -174,7 +174,7 @@ func (f *ForkChoice) IsOptimistic(_ context.Context, root [32]byte) (bool, error
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false, ErrNilNode
return false, errNilNode
}
return node.optimistic, nil
@@ -190,7 +190,7 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return nil, ErrNilNode
return nil, errNilNode
}
n := node
@@ -202,7 +202,7 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
}
if n == nil {
return nil, ErrNilNode
return nil, errNilNode
}
return n.root[:], nil
@@ -237,7 +237,7 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
if ok && vote.nextRoot != params.BeaconConfig().ZeroHash {
// Protection against nil node
if nextNode == nil {
return ErrNilNode
return errNilNode
}
nextNode.balance += newBalance
}
@@ -246,7 +246,7 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
if ok && vote.currentRoot != params.BeaconConfig().ZeroHash {
// Protection against nil node
if currentNode == nil {
return ErrNilNode
return errNilNode
}
if currentNode.balance < oldBalance {
return errInvalidBalance
@@ -279,7 +279,7 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams
defer f.store.nodesLock.Unlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return ErrNilNode
return errNilNode
}
return node.setNodeAndParentValidated(ctx)
}

View File

@@ -125,7 +125,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
assert.Equal(t, bytesutil.ToBytes32(r), indexToHash(3))
_, err = f.AncestorRoot(ctx, indexToHash(3), 0)
assert.ErrorContains(t, ErrNilNode.Error(), err)
assert.ErrorContains(t, errNilNode.Error(), err)
root, err := f.AncestorRoot(ctx, indexToHash(3), 5)
require.NoError(t, err)

View File

@@ -55,7 +55,7 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
hasViableDescendant := false
for _, child := range n.children {
if child == nil {
return ErrNilNode
return errNilNode
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
return err

View File

@@ -12,7 +12,7 @@ func (s *Store) removeNode(ctx context.Context, root [32]byte) error {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return ErrNilNode
return errNilNode
}
if !node.optimistic || node.parent == nil {
return errInvalidOptimisticStatus

View File

@@ -21,7 +21,6 @@ go_library(
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -5,7 +5,7 @@ import "errors"
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidNodeIndex = errors.New("node index is invalid")
var ErrUnknownNodeRoot = errors.New("unknown block root")
var errUnknownNodeRoot = errors.New("unknown block root")
var errInvalidJustifiedIndex = errors.New("justified index is invalid")
var errInvalidBestChildIndex = errors.New("best child index is invalid")
var errInvalidBestDescendantIndex = errors.New("best descendant index is invalid")

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/prysmaticlabs/prysm/config/params"
pmath "github.com/prysmaticlabs/prysm/math"
"go.opencensus.io/trace"
)
@@ -47,27 +46,19 @@ func computeDeltas(
if ok {
// Protection against out of bound, the `nextDeltaIndex` which defines
// the block location in the dag can not exceed the total `delta` length.
if nextDeltaIndex >= uint64(len(deltas)) {
if int(nextDeltaIndex) >= len(deltas) {
return nil, nil, errInvalidNodeDelta
}
delta, err := pmath.Int(newBalance)
if err != nil {
return nil, nil, err
}
deltas[nextDeltaIndex] += delta
deltas[nextDeltaIndex] += int(newBalance)
}
currentDeltaIndex, ok := blockIndices[vote.currentRoot]
if ok {
// Protection against out of bound (same as above)
if currentDeltaIndex >= uint64(len(deltas)) {
if int(currentDeltaIndex) >= len(deltas) {
return nil, nil, errInvalidNodeDelta
}
delta, err := pmath.Int(oldBalance)
if err != nil {
return nil, nil, err
}
deltas[currentDeltaIndex] -= delta
deltas[currentDeltaIndex] -= int(oldBalance)
}
}

View File

@@ -40,7 +40,7 @@ func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, err
index, ok := f.store.nodesIndices[root]
if !ok {
f.store.nodesLock.RUnlock()
return false, ErrUnknownNodeRoot
return false, errUnknownNodeRoot
}
node := f.store.nodes[index]
slot := node.slot
@@ -50,7 +50,6 @@ func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, err
_, ok = f.syncedTips.validatedTips[root]
if ok {
f.syncedTips.RUnlock()
f.store.nodesLock.RUnlock()
return false, nil
}
f.syncedTips.RUnlock()
@@ -58,13 +57,11 @@ func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, err
// If the slot is higher than the max synced tip, it's optimistic
min, max := f.boundarySyncedTips()
if slot > max {
f.store.nodesLock.RUnlock()
return true, nil
}
// If the slot is lower than the min synced tip, it's fully validated
if slot <= min {
f.store.nodesLock.RUnlock()
return false, nil
}
@@ -72,7 +69,6 @@ func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, err
// optimistic
childIndex := node.BestChild()
if childIndex == NonExistentNode {
f.store.nodesLock.RUnlock()
return true, nil
}

View File

@@ -147,10 +147,10 @@ func TestOptimistic(t *testing.T) {
// We test first nodes outside the Fork Choice store
_, err := f.IsOptimistic(ctx, root0)
require.ErrorIs(t, ErrUnknownNodeRoot, err)
require.ErrorIs(t, errUnknownNodeRoot, err)
_, err = f.IsOptimistic(ctx, root1)
require.ErrorIs(t, ErrUnknownNodeRoot, err)
require.ErrorIs(t, errUnknownNodeRoot, err)
// We check all nodes in the Fork Choice store.
op, err := f.IsOptimistic(ctx, nodeA.root)
@@ -548,45 +548,3 @@ func TestFindSyncedTip(t *testing.T) {
syncedTips.RUnlock()
}
}
// This is a regression test (10341)
func TestIsOptimistic_DeadLock(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 90, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
tips := map[[32]byte]types.Slot{
[32]byte{'a'}: 100,
[32]byte{'d'}: 102,
}
f.syncedTips.validatedTips = tips
_, err := f.IsOptimistic(ctx, [32]byte{'a'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'e'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'b'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'c'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
}

View File

@@ -9,7 +9,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
pmath "github.com/prysmaticlabs/prysm/math"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
@@ -153,7 +152,10 @@ func (f *ForkChoice) InsertOptimisticBlock(
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.InsertOptimisticBlock")
defer span.End()
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch)
if err := f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch); err != nil {
return err
}
return nil
}
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
@@ -440,11 +442,7 @@ func (s *Store) applyWeightChanges(
s.proposerBoostLock.Unlock()
return err
}
iProposerScore, err := pmath.Int(proposerScore)
if err != nil {
return err
}
nodeDelta = nodeDelta + iProposerScore
nodeDelta = nodeDelta + int(proposerScore)
}
s.proposerBoostLock.Unlock()

View File

@@ -1,8 +1,6 @@
package node
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/cmd"
@@ -37,7 +35,7 @@ func configureHistoricalSlasher(cliCtx *cli.Context) {
params.OverrideBeaconConfig(c)
cmdConfig := cmd.Get()
// Allow up to 4096 attestations at a time to be requested from the beacon nde.
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)) // lint:ignore uintcast -- Page size should not exceed int64 with these constants.
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations))
cmd.Init(cmdConfig)
log.Warnf(
"Setting %d slots per archive point and %d max RPC page size for historical slasher usage. This requires additional storage",
@@ -107,17 +105,10 @@ func configureInteropConfig(cliCtx *cli.Context) {
}
}
func configureExecutionSetting(cliCtx *cli.Context) error {
if !cliCtx.IsSet(flags.FeeRecipient.Name) {
return nil
func configureExecutionSetting(cliCtx *cli.Context) {
if cliCtx.IsSet(flags.FeeRecipient.Name) {
c := params.BeaconConfig()
c.FeeRecipient = common.HexToAddress(cliCtx.String(flags.FeeRecipient.Name))
params.OverrideBeaconConfig(c)
}
c := params.BeaconConfig()
ha := cliCtx.String(flags.FeeRecipient.Name)
if !common.IsHexAddress(ha) {
return fmt.Errorf("%s is not a valid fee recipient address", ha)
}
c.DefaultFeeRecipient = common.HexToAddress(ha)
params.OverrideBeaconConfig(c)
return nil
}

View File

@@ -93,20 +93,9 @@ func TestConfigureExecutionSetting(t *testing.T) {
set.String(flags.FeeRecipient.Name, "", "")
require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xB"))
cliCtx := cli.NewContext(&app, set, nil)
err := configureExecutionSetting(cliCtx)
require.ErrorContains(t, "0xB is not a valid fee recipient address", err)
require.NoError(t, set.Set(flags.FeeRecipient.Name, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
cliCtx = cli.NewContext(&app, set, nil)
err = configureExecutionSetting(cliCtx)
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
cliCtx = cli.NewContext(&app, set, nil)
err = configureExecutionSetting(cliCtx)
require.NoError(t, err)
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
configureExecutionSetting(cliCtx)
assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().FeeRecipient)
}
func TestConfigureNetwork(t *testing.T) {

View File

@@ -120,9 +120,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
configureEth1Config(cliCtx)
configureNetwork(cliCtx)
configureInteropConfig(cliCtx)
if err := configureExecutionSetting(cliCtx); err != nil {
return nil, err
}
configureExecutionSetting(cliCtx)
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()

View File

@@ -104,8 +104,8 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
span.AddAttributes(
trace.BoolAttribute("hasPeer", hasPeer),
trace.Int64Attribute("slot", int64(att.Data.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("slot", int64(att.Data.Slot)),
trace.Int64Attribute("subnet", int64(subnet)),
)
if !hasPeer {
@@ -160,8 +160,8 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
span.AddAttributes(
trace.BoolAttribute("hasPeer", hasPeer),
trace.Int64Attribute("slot", int64(sMsg.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("slot", int64(sMsg.Slot)),
trace.Int64Attribute("subnet", int64(subnet)),
)
if !hasPeer {
@@ -213,9 +213,7 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
if span.IsRecordingEvents() {
id := hash.FastSum64(buf.Bytes())
messageLen := int64(buf.Len())
// lint:ignore uintcast -- It's safe to do this for tracing.
iid := int64(id)
span.AddMessageSendEvent(iid, messageLen /*uncompressed*/, messageLen /*compressed*/)
span.AddMessageSendEvent(int64(id), messageLen /*uncompressed*/, messageLen /*compressed*/)
}
if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil {
err := errors.Wrap(err, "could not publish message")

View File

@@ -14,7 +14,6 @@ go_library(
],
deps = [
"//config/params:go_default_library",
"//math:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_golang_snappy//:go_default_library",

View File

@@ -3,6 +3,7 @@ package encoder
import (
"fmt"
"io"
"math"
"sync"
fastssz "github.com/ferranbt/fastssz"
@@ -10,7 +11,6 @@ import (
"github.com/golang/snappy"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/math"
)
var _ NetworkEncoding = (*SszNetworkEncoder)(nil)
@@ -145,11 +145,11 @@ func (_ SszNetworkEncoder) ProtocolSuffix() string {
// MaxLength specifies the maximum possible length of an encoded
// chunk of data.
func (_ SszNetworkEncoder) MaxLength(length uint64) (int, error) {
il, err := math.Int(length)
if err != nil {
return 0, errors.Wrap(err, "invalid length provided")
// Defensive check to prevent potential issues when casting to int64.
if length > math.MaxInt64 {
return 0, errors.Errorf("invalid length provided: %d", length)
}
maxLen := snappy.MaxEncodedLen(il)
maxLen := snappy.MaxEncodedLen(int(length))
if maxLen < 0 {
return 0, errors.Errorf("max encoded length is negative: %d", maxLen)
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/math"
"github.com/prysmaticlabs/prysm/network/forks"
)
@@ -73,8 +72,8 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsub_pb.Message) string {
// the topic byte string, and the raw message data: i.e. SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20].
func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
topic := *pmsg.Topic
topicLen := len(topic)
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(uint64(topicLen)) // topicLen cannot be negative
topicLen := uint64(len(topic))
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(topicLen)
// beyond Bellatrix epoch, allow 10 Mib gossip data size
gossipPubSubSize := params.BeaconNetworkConfig().GossipMaxSize
@@ -84,25 +83,7 @@ func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
decodedData, err := encoder.DecodeSnappy(pmsg.Data, gossipPubSubSize)
if err != nil {
totalLength, err := math.AddInt(
len(params.BeaconNetworkConfig().MessageDomainValidSnappy),
len(topicLenBytes),
topicLen,
len(pmsg.Data),
)
if err != nil {
log.WithError(err).Error("Failed to sum lengths of message domain and topic")
// should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
}
if uint64(totalLength) > gossipPubSubSize {
// this should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
}
totalLength := len(params.BeaconNetworkConfig().MessageDomainInvalidSnappy) + len(topicLenBytes) + int(topicLen) + len(pmsg.Data)
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:]...)
combinedData = append(combinedData, topicLenBytes...)
@@ -111,19 +92,7 @@ func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
h := hash.Hash(combinedData)
return string(h[:20])
}
totalLength, err := math.AddInt(
len(params.BeaconNetworkConfig().MessageDomainValidSnappy),
len(topicLenBytes),
topicLen,
len(decodedData),
)
if err != nil {
log.WithError(err).Error("Failed to sum lengths of message domain and topic")
// should never happen
msg := make([]byte, 20)
copy(msg, "invalid")
return string(msg)
}
totalLength := len(params.BeaconNetworkConfig().MessageDomainValidSnappy) + len(topicLenBytes) + int(topicLen) + len(decodedData)
combinedData := make([]byte, 0, totalLength)
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainValidSnappy[:]...)
combinedData = append(combinedData, topicLenBytes...)

View File

@@ -2,10 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"status.go",
],
srcs = ["status.go"],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
@@ -14,7 +11,6 @@ go_library(
"//config/features:go_default_library",
"//config/params:go_default_library",
"//crypto/rand:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
"//time:go_default_library",
@@ -26,7 +22,6 @@ go_library(
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,5 +0,0 @@
package peers
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "peers")

View File

@@ -40,7 +40,6 @@ import (
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/rand"
pmath "github.com/prysmaticlabs/prysm/math"
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/metadata"
prysmTime "github.com/prysmaticlabs/prysm/time"
@@ -750,12 +749,12 @@ func (p *Status) PeersToPrune() []peer.ID {
return p.deprecatedPeersToPrune()
}
connLimit := p.ConnectedPeerLimit()
inBoundLimit := uint64(p.InboundLimit())
inBoundLimit := p.InboundLimit()
activePeers := p.Active()
numInboundPeers := uint64(len(p.InboundConnected()))
numInboundPeers := len(p.InboundConnected())
// Exit early if we are still below our max
// limit.
if uint64(len(activePeers)) <= connLimit {
if len(activePeers) <= int(connLimit) {
return []peer.ID{}
}
p.store.Lock()
@@ -785,15 +784,10 @@ func (p *Status) PeersToPrune() []peer.ID {
// Determine amount of peers to prune using our
// max connection limit.
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
if err != nil {
// This should never happen.
log.WithError(err).Error("Failed to determine amount of peers to prune")
return []peer.ID{}
}
amountToPrune := len(activePeers) - int(connLimit)
// Also check for inbound peers above our limit.
excessInbound := uint64(0)
excessInbound := 0
if numInboundPeers > inBoundLimit {
excessInbound = numInboundPeers - inBoundLimit
}
@@ -802,7 +796,7 @@ func (p *Status) PeersToPrune() []peer.ID {
if excessInbound > amountToPrune {
amountToPrune = excessInbound
}
if amountToPrune < uint64(len(peersToPrune)) {
if amountToPrune < len(peersToPrune) {
peersToPrune = peersToPrune[:amountToPrune]
}
ids := make([]peer.ID, 0, len(peersToPrune))
@@ -821,7 +815,7 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
numInboundPeers := len(p.InboundConnected())
// Exit early if we are still below our max
// limit.
if uint64(len(activePeers)) <= connLimit {
if len(activePeers) <= int(connLimit) {
return []peer.ID{}
}
p.store.Lock()
@@ -851,23 +845,18 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
// Determine amount of peers to prune using our
// max connection limit.
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
if err != nil {
// This should never happen
log.WithError(err).Error("Failed to determine amount of peers to prune")
return []peer.ID{}
}
amountToPrune := len(activePeers) - int(connLimit)
// Also check for inbound peers above our limit.
excessInbound := uint64(0)
excessInbound := 0
if numInboundPeers > inBoundLimit {
excessInbound = uint64(numInboundPeers - inBoundLimit)
excessInbound = numInboundPeers - inBoundLimit
}
// Prune the largest amount between excess peers and
// excess inbound peers.
if excessInbound > amountToPrune {
amountToPrune = excessInbound
}
if amountToPrune < uint64(len(peersToPrune)) {
if amountToPrune < len(peersToPrune) {
peersToPrune = peersToPrune[:amountToPrune]
}
ids := make([]peer.ID, 0, len(peersToPrune))

View File

@@ -40,7 +40,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
defer span.End()
span.AddAttributes(trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- It's safe to do this for tracing.
span.AddAttributes(trace.Int64Attribute("index", int64(index)))
if s.dv5Listener == nil {
// return if discovery isn't set
@@ -138,7 +138,7 @@ func (s *Service) hasPeerWithSubnet(topic string) bool {
// In the event peer threshold is lower, we will choose the lower
// threshold.
minPeers := mathutil.Min(1, uint64(flags.Get().MinimumPeersPerSubnet))
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers) // lint:ignore uintcast -- Min peers can be safely cast to int.
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers)
}
// Updates the service's discv5 listener record's attestation subnet
@@ -195,7 +195,6 @@ func attSubnets(record *enr.Record) ([]uint64, error) {
if err != nil {
return nil, err
}
// lint:ignore uintcast -- subnet count can be safely cast to int.
if len(bitV) != byteCount(int(attestationSubnetCount)) {
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
}
@@ -215,7 +214,6 @@ func syncSubnets(record *enr.Record) ([]uint64, error) {
if err != nil {
return nil, err
}
// lint:ignore uintcast -- subnet count can be safely cast to int.
if len(bitV) != byteCount(int(syncCommsSubnetCount)) {
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
}

View File

@@ -122,7 +122,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
}
// Initialize a pointer to eth1 chain's history to start our search
// from.
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
cursorNum := big.NewInt(int64(latestBlkHeight))
cursorTime := latestBlkTime
numOfBlocks := uint64(0)
@@ -168,9 +168,9 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
}
if cursorTime > time {
return s.findLessTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findLessTargetEth1Block(ctx, big.NewInt(int64(estimatedBlk)), time)
}
return s.findMoreTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
return s.findMoreTargetEth1Block(ctx, big.NewInt(int64(estimatedBlk)), time)
}
// Performs a search to find a target eth1 block which is earlier than or equal to the
@@ -214,7 +214,7 @@ func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int
}
func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*types.HeaderInfo, error) {
bn := big.NewInt(0).SetUint64(bNum)
bn := big.NewInt(int64(bNum))
exists, info, err := s.headerCache.HeaderInfoByHeight(bn)
if err != nil {
return nil, err

View File

@@ -109,7 +109,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
// This can happen sometimes when we receive the same log twice from the
// ETH1.0 network, and prevents us from updating our trie
// with the same log twice, causing an inconsistent state root.
index := int64(binary.LittleEndian.Uint64(merkleTreeIndex)) // lint:ignore uintcast -- MerkleTreeIndex should not exceed int64 in your lifetime.
index := int64(binary.LittleEndian.Uint64(merkleTreeIndex))
if index <= s.lastReceivedMerkleIndex {
return nil
}
@@ -211,7 +211,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
s.chainStartData.Chainstarted = true
s.chainStartData.GenesisBlock = blockNumber.Uint64()
chainStartTime := time.Unix(int64(genesisTime), 0) // lint:ignore uintcast -- Genesis time wont exceed int64 in your lifetime.
chainStartTime := time.Unix(int64(genesisTime), 0)
for i := range s.chainStartData.ChainstartDeposits {
proof, err := s.depositTrie.MerkleProof(i)
@@ -303,8 +303,8 @@ func (s *Service) processPastLogs(ctx context.Context) error {
Addresses: []common.Address{
s.cfg.depositContractAddr,
},
FromBlock: big.NewInt(0).SetUint64(start),
ToBlock: big.NewInt(0).SetUint64(end),
FromBlock: big.NewInt(int64(start)),
ToBlock: big.NewInt(int64(end)),
}
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
// only change the end block if the remaining logs are below the required log limit.
@@ -312,7 +312,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
withinLimit := remainingLogs < depositlogRequestLimit
aboveFollowHeight := end >= latestFollowHeight
if withinLimit && aboveFollowHeight {
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
query.ToBlock = big.NewInt(int64(latestFollowHeight))
end = latestFollowHeight
}
logs, err := s.httpLogger.FilterLogs(ctx, query)
@@ -391,7 +391,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
}
}
if fState != nil && !fState.IsNil() && fState.Eth1DepositIndex() > 0 {
s.cfg.depositCache.PrunePendingDeposits(ctx, int64(fState.Eth1DepositIndex())) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
s.cfg.depositCache.PrunePendingDeposits(ctx, int64(fState.Eth1DepositIndex()))
}
return nil
}
@@ -413,11 +413,11 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
}
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
// Cache eth1 block header here.
_, err := s.BlockHashByHeight(ctx, big.NewInt(0).SetUint64(i))
_, err := s.BlockHashByHeight(ctx, big.NewInt(int64(i)))
if err != nil {
return err
}
err = s.ProcessETH1Block(ctx, big.NewInt(0).SetUint64(i))
err = s.ProcessETH1Block(ctx, big.NewInt(int64(i)))
if err != nil {
return err
}

View File

@@ -613,10 +613,8 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
// accumulates. we finalize them here before we are ready to receive a block.
// Otherwise, the first few blocks will be slower to compute as we will
// hold the lock and be busy finalizing the deposits.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex)) // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex))
// Deposit proofs are only used during state transition and can be safely removed to save space.
// lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
if err = s.cfg.depositCache.PruneProofs(ctx, int64(currIndex)); err != nil {
return errors.Wrap(err, "could not prune deposit proofs")
}
@@ -664,7 +662,7 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
err := error(nil)
elems = append(elems, gethRPC.BatchElem{
Method: "eth_getBlockByNumber",
Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(i)), false},
Args: []interface{}{hexutil.EncodeBig(big.NewInt(int64(i))), false},
Result: header,
Error: err,
})
@@ -1090,7 +1088,7 @@ func dedupEndpoints(endpoints []string) []string {
func eth1HeadIsBehind(timestamp uint64) bool {
timeout := prysmTime.Now().Add(-eth1Threshold)
// check that web3 client is syncing
return time.Unix(int64(timestamp), 0).Before(timeout) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime.
return time.Unix(int64(timestamp), 0).Before(timeout)
}
func (s *Service) primaryConnected() bool {

View File

@@ -181,14 +181,9 @@ type altairPublishBlockRequestJson struct {
Signature string `json:"signature" hex:"true"`
}
type bellatrixPublishBlockRequestJson struct {
BellatrixBlock *beaconBlockBellatrixJson `json:"bellatrix_block"`
Signature string `json:"signature" hex:"true"`
}
// setInitialPublishBlockPostRequest is triggered before we deserialize the request JSON into a struct.
// We don't know which version of the block got posted, but we can determine it from the slot.
// We know that blocks of all versions have a Message field with a Slot field,
// We know that both Phase 0 and Altair blocks have a Message field with a Slot field,
// so we deserialize the request into a struct s, which has the right fields, to obtain the slot.
// Once we know the slot, we can determine what the PostRequest field of the endpoint should be, and we set it appropriately.
func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
@@ -212,20 +207,17 @@ func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
}
currentEpoch := slots.ToEpoch(types.Slot(slot))
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
if slots.ToEpoch(types.Slot(slot)) < params.BeaconConfig().AltairForkEpoch {
endpoint.PostRequest = &signedBeaconBlockContainerJson{}
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
endpoint.PostRequest = &signedBeaconBlockAltairContainerJson{}
} else {
endpoint.PostRequest = &signedBeaconBlockBellatrixContainerJson{}
endpoint.PostRequest = &signedBeaconBlockAltairContainerJson{}
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
return true, nil
}
// In preparePublishedBlock we transform the PostRequest.
// gRPC expects an XXX_block field in the JSON object, but we have a message field at this point.
// gRPC expects either a phase0_block or an altair_block field in the JSON object, but we have a message field at this point.
// We do a simple conversion depending on the type of endpoint.PostRequest
// (which was filled out previously in setInitialPublishBlockPostRequest).
func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
@@ -247,15 +239,6 @@ func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWrit
endpoint.PostRequest = actualPostReq
return nil
}
if block, ok := endpoint.PostRequest.(*signedBeaconBlockBellatrixContainerJson); ok {
// Prepare post request that can be properly decoded on gRPC side.
actualPostReq := &bellatrixPublishBlockRequestJson{
BellatrixBlock: block.Message,
Signature: block.Signature,
}
endpoint.PostRequest = actualPostReq
return nil
}
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
}

View File

@@ -363,11 +363,6 @@ func TestWrapSignedContributionAndProofsArray(t *testing.T) {
}
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
params.OverrideBeaconConfig(cfg)
endpoint := &apimiddleware.Endpoint{}
s := struct {
Message struct {
@@ -402,21 +397,6 @@ func TestSetInitialPublishBlockPostRequest(t *testing.T) {
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(signedBeaconBlockAltairContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
t.Run("Bellatrix", func(t *testing.T) {
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
require.NoError(t, err)
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
j, err := json.Marshal(s)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(j)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
assert.Equal(t, reflect.TypeOf(signedBeaconBlockBellatrixContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
})
}
func TestPreparePublishedBlock(t *testing.T) {
@@ -448,20 +428,6 @@ func TestPreparePublishedBlock(t *testing.T) {
assert.Equal(t, true, ok)
})
t.Run("Bellatrix", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &signedBeaconBlockBellatrixContainerJson{
Message: &beaconBlockBellatrixJson{
Body: &beaconBlockBodyBellatrixJson{},
},
},
}
errJson := preparePublishedBlock(endpoint, nil, nil)
require.Equal(t, true, errJson == nil)
_, ok := endpoint.PostRequest.(*bellatrixPublishBlockRequestJson)
assert.Equal(t, true, ok)
})
t.Run("unsupported block type", func(t *testing.T) {
errJson := preparePublishedBlock(&apimiddleware.Endpoint{}, nil, nil)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block type"))
@@ -505,7 +471,8 @@ func TestSerializeV2Block(t *testing.T) {
StateRoot: "root",
Body: &beaconBlockBodyJson{},
},
Signature: "sig",
AltairBlock: nil,
Signature: "sig",
},
}
runDefault, j, errJson := serializeV2Block(response)
@@ -528,6 +495,7 @@ func TestSerializeV2Block(t *testing.T) {
response := &blockV2ResponseJson{
Version: ethpbv2.Version_ALTAIR.String(),
Data: &signedBeaconBlockContainerV2Json{
Phase0Block: nil,
AltairBlock: &beaconBlockAltairJson{
Slot: "1",
ProposerIndex: "1",
@@ -554,36 +522,6 @@ func TestSerializeV2Block(t *testing.T) {
require.NotNil(t, beaconBlock.Body)
})
t.Run("Bellatrix", func(t *testing.T) {
response := &blockV2ResponseJson{
Version: ethpbv2.Version_BELLATRIX.String(),
Data: &signedBeaconBlockContainerV2Json{
BellatrixBlock: &beaconBlockBellatrixJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &beaconBlockBodyBellatrixJson{},
},
Signature: "sig",
},
}
runDefault, j, errJson := serializeV2Block(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &bellatrixBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data.Message)
beaconBlock := resp.Data.Message
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("incorrect response type", func(t *testing.T) {
response := &types.Empty{}
runDefault, j, errJson := serializeV2Block(response)

View File

@@ -100,7 +100,7 @@ func TestGetSpec(t *testing.T) {
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
config.TerminalBlockHashActivationEpoch = 72
config.TerminalTotalDifficulty = "73"
config.DefaultFeeRecipient = common.HexToAddress("DefaultFeeRecipient")
config.FeeRecipient = common.HexToAddress("FeeRecipient")
var dbp [4]byte
copy(dbp[:], []byte{'0', '0', '0', '1'})
@@ -329,8 +329,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(v))
case "TERMINAL_TOTAL_DIFFICULTY":
assert.Equal(t, "73", v)
case "DefaultFeeRecipient":
assert.Equal(t, common.HexToAddress("DefaultFeeRecipient"), v)
case "FeeRecipient":
assert.Equal(t, common.HexToAddress("FeeRecipient"), v)
case "PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":
assert.Equal(t, "3", v)
case "MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":

View File

@@ -61,7 +61,6 @@ go_test(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1/mocks:go_default_library",
"//beacon-chain/powchain/testing:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
@@ -72,7 +71,6 @@ go_test(
"//config/params:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",

View File

@@ -252,11 +252,6 @@ func (vs *Server) ProduceBlock(ctx context.Context, req *ethpbv1.ProduceBlockReq
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlock")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
block, err := vs.v1BeaconBlock(ctx, req)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block: %v", err)
@@ -268,9 +263,18 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
epoch := slots.ToEpoch(req.Slot)
if epoch < params.BeaconConfig().AltairForkEpoch {
block, err := vs.v1BeaconBlock(ctx, req)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_PHASE0,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_Phase0Block{Phase0Block: block},
},
}, nil
}
v1alpha1req := &ethpbalpha.BlockRequest{
@@ -283,46 +287,20 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
// We simply return err because it's already of a gRPC error type.
return nil, err
}
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
if ok {
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_PHASE0,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_Phase0Block{Phase0Block: block},
},
}, nil
}
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
if ok {
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_ALTAIR,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_AltairBlock{AltairBlock: block},
},
}, nil
if !ok {
return nil, status.Errorf(codes.Internal, "Could not get Altair block: %v", err)
}
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
if ok {
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Bellatrix)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_BELLATRIX,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_BellatrixBlock{BellatrixBlock: block},
},
}, nil
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
return &ethpbv2.ProduceBlockResponseV2{
Version: ethpbv2.Version_ALTAIR,
Data: &ethpbv2.BeaconBlockContainerV2{
Block: &ethpbv2.BeaconBlockContainerV2_AltairBlock{AltairBlock: block},
},
}, nil
}
// PrepareBeaconProposer --

View File

@@ -24,7 +24,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
p2pmock "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1/mocks"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
v1alpha1validator "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/validator"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/testutil"
@@ -35,7 +34,6 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
"github.com/prysmaticlabs/prysm/proto/migration"
@@ -612,7 +610,6 @@ func TestProduceBlock(t *testing.T) {
}
v1Server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
V1Alpha1Server: v1Alpha1Server,
}
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
@@ -644,17 +641,6 @@ func TestProduceBlock(t *testing.T) {
assert.DeepEqual(t, expectedAttSlashings, resp.Data.Body.AttesterSlashings)
}
func TestProduceBlock_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
}
_, err := vs.ProduceBlock(context.Background(), &ethpbv1.ProduceBlockRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
func TestProduceBlockV2(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
db := dbutil.SetupDB(t)
@@ -715,7 +701,6 @@ func TestProduceBlockV2(t *testing.T) {
v1Server := &Server{
V1Alpha1Server: v1Alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
require.NoError(t, err)
@@ -849,7 +834,6 @@ func TestProduceBlockV2(t *testing.T) {
v1Server := &Server{
V1Alpha1Server: v1Alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
require.NoError(t, err)
@@ -890,166 +874,6 @@ func TestProduceBlockV2(t *testing.T) {
assert.DeepEqual(t, expectedBits, blk.Body.SyncAggregate.SyncCommitteeBits)
assert.DeepEqual(t, aggregatedSig, blk.Body.SyncAggregate.SyncCommitteeSignature)
})
t.Run("Bellatrix", func(t *testing.T) {
db := dbutil.SetupDB(t)
ctx := context.Background()
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.AltairForkEpoch = types.Epoch(0)
bc.BellatrixForkEpoch = types.Epoch(1)
params.OverrideBeaconConfig(bc)
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().SyncCommitteeSize)
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, beaconState.SetNextSyncCommittee(syncCommittee))
stateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
genesisBlock := util.NewBeaconBlockBellatrix()
genesisBlock.Block.StateRoot = stateRoot[:]
wrappedBellatrixBlock, err := wrapper.WrappedBellatrixSignedBeaconBlock(genesisBlock)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wrappedBellatrixBlock))
parentRoot, err := genesisBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
v1Alpha1Server := &v1alpha1validator.Server{
ExecutionEngineCaller: &mocks.EngineClient{
ExecutionBlock: &enginev1.ExecutionBlock{
TotalDifficulty: "0x1",
},
},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
ChainStartFetcher: &mockPOW.POWChain{},
Eth1InfoFetcher: &mockPOW.POWChain{},
Eth1BlockFetcher: &mockPOW.POWChain{},
MockEth1Votes: true,
AttPool: attestations.NewPool(),
SlashingsPool: slashings.NewPool(),
ExitPool: voluntaryexits.NewPool(),
StateGen: stategen.New(db),
SyncCommitteePool: synccommittee.NewStore(),
}
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
beaconState,
privKeys[i],
i,
)
require.NoError(t, err)
proposerSlashings[i] = proposerSlashing
err = v1Alpha1Server.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
require.NoError(t, err)
}
attSlashings := make([]*ethpbalpha.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
beaconState,
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
)
require.NoError(t, err)
attSlashings[i] = attesterSlashing
err = v1Alpha1Server.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
require.NoError(t, err)
}
aggregationBits := bitfield.NewBitvector128()
for i := range aggregationBits {
aggregationBits[i] = 0xAA
}
syncCommitteeIndices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
require.NoError(t, err)
sigs := make([]bls.Signature, 0, len(syncCommitteeIndices))
for i, indice := range syncCommitteeIndices {
if aggregationBits.BitAt(uint64(i)) {
b := p2pType.SSZBytes(parentRoot[:])
sb, err := signing.ComputeDomainAndSign(beaconState, coreTime.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs = append(sigs, sig)
}
}
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
contribution := &ethpbalpha.SyncCommitteeContribution{
Slot: params.BeaconConfig().SlotsPerEpoch,
BlockRoot: parentRoot[:],
SubcommitteeIndex: 0,
AggregationBits: aggregationBits,
Signature: aggregatedSig,
}
require.NoError(t, v1Alpha1Server.SyncCommitteePool.SaveSyncCommitteeContribution(contribution))
v1Server := &Server{
V1Alpha1Server: v1Alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
randaoReveal, err := util.RandaoReveal(beaconState, 1, privKeys)
require.NoError(t, err)
graffiti := bytesutil.ToBytes32([]byte("eth2"))
req := &ethpbv1.ProduceBlockRequest{
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
RandaoReveal: randaoReveal,
Graffiti: graffiti[:],
}
resp, err := v1Server.ProduceBlockV2(ctx, req)
require.NoError(t, err)
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
containerBlock, ok := resp.Data.Block.(*ethpbv2.BeaconBlockContainerV2_BellatrixBlock)
require.Equal(t, true, ok)
blk := containerBlock.BellatrixBlock
assert.Equal(t, req.Slot, blk.Slot, "Expected block to have slot of 1")
assert.DeepEqual(t, parentRoot[:], blk.ParentRoot, "Expected block to have correct parent root")
assert.DeepEqual(t, randaoReveal, blk.Body.RandaoReveal, "Expected block to have correct randao reveal")
assert.DeepEqual(t, req.Graffiti, blk.Body.Graffiti, "Expected block to have correct graffiti")
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(blk.Body.ProposerSlashings)))
expectedPropSlashings := make([]*ethpbv1.ProposerSlashing, len(proposerSlashings))
for i, slash := range proposerSlashings {
expectedPropSlashings[i] = migration.V1Alpha1ProposerSlashingToV1(slash)
}
assert.DeepEqual(t, expectedPropSlashings, blk.Body.ProposerSlashings)
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(blk.Body.AttesterSlashings)))
expectedAttSlashings := make([]*ethpbv1.AttesterSlashing, len(attSlashings))
for i, slash := range attSlashings {
expectedAttSlashings[i] = migration.V1Alpha1AttSlashingToV1(slash)
}
assert.DeepEqual(t, expectedAttSlashings, blk.Body.AttesterSlashings)
expectedBits := bitfield.NewBitvector512()
for i := 0; i <= 15; i++ {
expectedBits[i] = 0xAA
}
assert.DeepEqual(t, expectedBits, blk.Body.SyncAggregate.SyncCommitteeBits)
assert.DeepEqual(t, aggregatedSig, blk.Body.SyncAggregate.SyncCommitteeSignature)
})
}
func TestProduceBlockV2_SyncNotReady(t *testing.T) {
chainService := &mockChain.ChainService{}
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
}
_, err := vs.ProduceBlockV2(context.Background(), &ethpbv1.ProduceBlockRequest{})
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
}
func TestProduceAttestationData(t *testing.T) {

View File

@@ -545,7 +545,7 @@ func (is *infostream) depositQueueTimestamp(eth1BlockNumber *big.Int) (uint64, e
is.eth1BlocktimesMutex.Unlock()
followTime := time.Duration(params.BeaconConfig().Eth1FollowDistance*params.BeaconConfig().SecondsPerETH1Block) * time.Second
eth1UnixTime := time.Unix(int64(blockTimestamp), 0).Add(followTime) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime
eth1UnixTime := time.Unix(int64(blockTimestamp), 0).Add(followTime)
period := uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
votingPeriod := time.Duration(period*params.BeaconConfig().SecondsPerSlot) * time.Second

View File

@@ -42,7 +42,6 @@ go_library(
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
@@ -118,7 +117,6 @@ go_test(
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/execution:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",

View File

@@ -187,7 +187,7 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
}
// Are the validators in current or next epoch sync committee.
if ok && coreTime.HigherEqualThanAltairVersionAndEpoch(s, req.Epoch) {
if ok && coreTime.AltairCompatible(s, req.Epoch) {
assignment.IsSyncCommittee, err = helpers.IsCurrentPeriodSyncCommittee(s, idx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)

View File

@@ -12,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/beacon-chain/core/execution"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -201,111 +200,6 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
}
}
func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.MainnetConfig()
cfg.AltairForkEpoch = types.Epoch(0)
cfg.BellatrixForkEpoch = types.Epoch(1)
params.OverrideBeaconConfig(cfg)
genesis := util.NewBeaconBlock()
deposits, _, err := util.DeterministicDepositsAndKeys(params.BeaconConfig().SyncCommitteeSize)
require.NoError(t, err)
eth1Data, err := util.DeterministicEth1Data(len(deposits))
require.NoError(t, err)
bs, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data)
h := &ethpb.BeaconBlockHeader{
StateRoot: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
ParentRoot: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
BodyRoot: bytesutil.PadTo([]byte{'c'}, fieldparams.RootLength),
}
require.NoError(t, bs.SetLatestBlockHeader(h))
require.NoError(t, err, "Could not setup genesis bs")
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
syncCommittee, err := altair.NextSyncCommittee(context.Background(), bs)
require.NoError(t, err)
require.NoError(t, bs.SetCurrentSyncCommittee(syncCommittee))
pubKeys := make([][]byte, len(deposits))
indices := make([]uint64, len(deposits))
for i := 0; i < len(deposits); i++ {
pubKeys[i] = deposits[i].Data.PublicKey
indices[i] = uint64(i)
}
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)-1))
require.NoError(t, helpers.UpdateSyncCommitteeCache(bs))
bs, err = execution.UpgradeToBellatrix(context.Background(), bs)
require.NoError(t, err)
pubkeysAs48ByteType := make([][fieldparams.BLSPubkeyLength]byte, len(pubKeys))
for i, pk := range pubKeys {
pubkeysAs48ByteType[i] = bytesutil.ToBytes48(pk)
}
slot := uint64(params.BeaconConfig().SlotsPerEpoch) * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * params.BeaconConfig().SecondsPerSlot
chain := &mockChain.ChainService{
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
}
vs := &Server{
HeadFetcher: chain,
TimeFetcher: chain,
Eth1InfoFetcher: &mockPOW.POWChain{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
// Test the first validator in registry.
req := &ethpb.DutiesRequest{
PublicKeys: [][]byte{deposits[0].Data.PublicKey},
}
res, err := vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Assigned slot %d can't be higher than %d",
res.CurrentEpochDuties[0].AttesterSlot, bs.Slot()+params.BeaconConfig().SlotsPerEpoch)
}
// Test the last validator in registry.
lastValidatorIndex := params.BeaconConfig().SyncCommitteeSize - 1
req = &ethpb.DutiesRequest{
PublicKeys: [][]byte{deposits[lastValidatorIndex].Data.PublicKey},
}
res, err = vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Assigned slot %d can't be higher than %d",
res.CurrentEpochDuties[0].AttesterSlot, bs.Slot()+params.BeaconConfig().SlotsPerEpoch)
}
// We request for duties for all validators.
req = &ethpb.DutiesRequest{
PublicKeys: pubKeys,
Epoch: 0,
}
res, err = vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
for i := 0; i < len(res.CurrentEpochDuties); i++ {
assert.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
}
for i := 0; i < len(res.CurrentEpochDuties); i++ {
assert.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
// Current epoch and next epoch duties should be equal before the sync period epoch boundary.
assert.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
}
// Current epoch and next epoch duties should not be equal at the sync period epoch boundary.
req = &ethpb.DutiesRequest{
PublicKeys: pubKeys,
Epoch: params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1,
}
res, err = vs.GetDuties(context.Background(), req)
require.NoError(t, err, "Could not call epoch committee assignment")
for i := 0; i < len(res.CurrentEpochDuties); i++ {
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
}
}
func TestGetAltairDuties_UnknownPubkey(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.MainnetConfig()

View File

@@ -16,7 +16,7 @@ func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockR
return nil, err
}
payload, err := vs.getExecutionPayload(ctx, req.Slot, altairBlk.ProposerIndex)
payload, err := vs.getExecutionPayload(ctx, req.Slot)
if err != nil {
return nil, err
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
@@ -25,7 +24,7 @@ import (
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
// The payload is computed given the respected time of merge.
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx types.ValidatorIndex) (*enginev1.ExecutionPayload, error) {
func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot) (*enginev1.ExecutionPayload, error) {
st, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, err
@@ -95,24 +94,10 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
SafeBlockHash: parentHash,
FinalizedBlockHash: finalizedBlockHash,
}
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
switch err == nil {
case true:
feeRecipient = recipient
case errors.As(err, kv.ErrNotFoundFeeRecipient): // If fee recipient is not found, use the default fee recipient.
logrus.WithFields(logrus.Fields{
"validatorIndex": vIdx,
"defaultFeeRecipient": feeRecipient,
}).Error("Fee recipient not found. Using default fee recipient")
default:
return nil, errors.Wrap(err, "could not get fee recipient in db")
}
p := &enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: random,
SuggestedFeeRecipient: feeRecipient.Bytes(),
SuggestedFeeRecipient: params.BeaconConfig().FeeRecipient.Bytes(),
}
payloadID, _, err := vs.ExecutionEngineCaller.ForkchoiceUpdated(ctx, f, p)
if err != nil {
@@ -219,8 +204,6 @@ func (vs *Server) getPowBlockHashAtTerminalTotalDifficulty(ctx context.Context)
}).Info("Retrieved terminal block hash")
return blk.Hash, true, nil
}
} else {
return nil, false, nil
}
blk = parentBlk
}

View File

@@ -79,7 +79,6 @@ func TestServer_getExecutionPayload(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
require.NoError(t, beaconDB.SaveBlock(context.Background(), b1))
require.NoError(t, beaconDB.SaveBlock(context.Background(), b2))
require.NoError(t, beaconDB.SaveFeeRecipientsByValidatorIDs(context.Background(), []types.ValidatorIndex{0}, []common.Address{{}}))
tests := []struct {
name string
@@ -89,7 +88,6 @@ func TestServer_getExecutionPayload(t *testing.T) {
payloadID *pb.PayloadIDBytes
terminalBlockHash common.Hash
activationEpoch types.Epoch
validatorIndx types.ValidatorIndex
}{
{
name: "transition completed, nil payload id",
@@ -97,16 +95,10 @@ func TestServer_getExecutionPayload(t *testing.T) {
errString: "nil payload id",
},
{
name: "transition completed, happy case (has fee recipient in Db)",
name: "transition completed, happy case",
st: transitionSt,
payloadID: &pb.PayloadIDBytes{0x1},
},
{
name: "transition completed, happy case (doesn't have fee recipient in Db)",
st: transitionSt,
payloadID: &pb.PayloadIDBytes{0x1},
validatorIndx: 1,
},
{
name: "transition completed, could not prepare payload",
st: transitionSt,
@@ -137,7 +129,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
HeadFetcher: &chainMock.ChainService{State: tt.st},
BeaconDB: beaconDB,
}
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx)
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot())
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
@@ -233,20 +225,6 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
wantExists: true,
wantTerminalBlockHash: []byte{'a'},
},
{
name: "ttd not reached",
paramsTd: "3",
currentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'a'},
ParentHash: []byte{'b'},
TotalDifficulty: "0x2",
},
parentPowBlock: &pb.ExecutionBlock{
Hash: []byte{'b'},
ParentHash: []byte{'c'},
TotalDifficulty: "0x1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -42,7 +42,6 @@ import (
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
@@ -2234,7 +2233,6 @@ func TestProposer_GetBeaconBlock_PostForkEpoch(t *testing.T) {
func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
db := dbutil.SetupDB(t)
ctx := context.Background()
hook := logTest.NewGlobal()
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
@@ -2335,7 +2333,6 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
ExecutionPayload: payload,
},
BeaconDB: db,
}
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
@@ -2349,10 +2346,6 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
Graffiti: graffiti[:],
}
proposerIndex := types.ValidatorIndex(40)
addr := common.Address{'a'}
require.NoError(t, proposerServer.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{proposerIndex}, []common.Address{addr}))
block, err := proposerServer.GetBeaconBlock(ctx, req)
require.NoError(t, err)
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
@@ -2363,7 +2356,6 @@ func TestProposer_GetBeaconBlock_BellatrixEpoch(t *testing.T) {
assert.DeepEqual(t, randaoReveal, bellatrixBlk.Bellatrix.Body.RandaoReveal, "Expected block to have correct randao reveal")
assert.DeepEqual(t, req.Graffiti, bellatrixBlk.Bellatrix.Body.Graffiti, "Expected block to have correct Graffiti")
require.LogsDoNotContain(t, hook, "Fee recipient not found. Using default fee recipient.")
require.DeepEqual(t, payload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
}

View File

@@ -15,7 +15,6 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -7,7 +7,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/beacon-chain/state/types"
pmath "github.com/prysmaticlabs/prysm/math"
)
// FieldTrie is the representation of the representative
@@ -111,15 +110,11 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
if err != nil {
return [32]byte{}, err
}
iNumOfElems, err := pmath.Int(numOfElems)
if err != nil {
return [32]byte{}, err
}
// We remove the duplicates here in order to prevent
// duplicated insertions into the trie.
newIndices := []uint64{}
indexExists := make(map[uint64]bool)
newRoots := make([][32]byte, 0, len(fieldRoots)/iNumOfElems)
newRoots := make([][32]byte, 0, len(fieldRoots)/int(numOfElems))
for i, idx := range indices {
startIdx := idx / numOfElems
if indexExists[startIdx] {

View File

@@ -12,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
pmath "github.com/prysmaticlabs/prysm/math"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
)
@@ -58,7 +57,7 @@ func validateElements(field types.FieldIndex, dataType types.DataType, elements
length *= comLength
}
val := reflect.Indirect(reflect.ValueOf(elements))
if uint64(val.Len()) > length {
if val.Len() > int(length) {
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(version.Phase0), val.Len(), length)
}
return nil
@@ -319,10 +318,6 @@ func handleBalanceSlice(val, indices []uint64, convertAll bool) ([][32]byte, err
if err != nil {
return nil, err
}
iNumOfElems, err := pmath.Int(numOfElems)
if err != nil {
return nil, err
}
roots := [][32]byte{}
for _, idx := range indices {
// We split the indexes into their relevant groups. Balances
@@ -330,14 +325,14 @@ func handleBalanceSlice(val, indices []uint64, convertAll bool) ([][32]byte, err
startIdx := idx / numOfElems
startGroup := startIdx * numOfElems
chunk := [32]byte{}
sizeOfElem := len(chunk) / iNumOfElems
sizeOfElem := len(chunk) / int(numOfElems)
for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 {
wantedVal := uint64(0)
// We are adding chunks in sets of 4, if the set is at the edge of the array
// then you will need to zero out the rest of the chunk. Ex : 41 indexes,
// so 41 % 4 = 1 . There are 3 indexes, which do not exist yet but we
// have to add in as a root. These 3 indexes are then given a 'zero' value.
if j < uint64(len(val)) {
if int(j) < len(val) {
wantedVal = val[j]
}
binary.LittleEndian.PutUint64(chunk[i:i+sizeOfElem], wantedVal)

View File

@@ -1,4 +1,3 @@
//go:build !minimal
// +build !minimal
package v1

View File

@@ -1,4 +1,3 @@
//go:build !minimal
// +build !minimal
package v1

View File

@@ -1,4 +1,3 @@
//go:build minimal
// +build minimal
package v1

View File

@@ -1,4 +1,3 @@
//go:build minimal
// +build minimal
package v1

View File

@@ -3,43 +3,75 @@ package v1
import (
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
testtmpl "github.com/prysmaticlabs/prysm/beacon-chain/state/testing"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestBeaconState_LatestBlockHeader(t *testing.T) {
testtmpl.VerifyBeaconStateLatestBlockHeader(
t,
func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{})
},
func(BH *ethpb.BeaconBlockHeader) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{LatestBlockHeader: BH})
},
)
s, err := InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
got := s.LatestBlockHeader()
require.DeepEqual(t, (*ethpb.BeaconBlockHeader)(nil), got)
want := &ethpb.BeaconBlockHeader{Slot: 100}
s, err = InitializeFromProto(&ethpb.BeaconState{LatestBlockHeader: want})
require.NoError(t, err)
got = s.LatestBlockHeader()
require.DeepEqual(t, want, got)
// Test copy does not mutate.
got.Slot = 101
require.DeepNotEqual(t, want, got)
}
func TestBeaconState_BlockRoots(t *testing.T) {
testtmpl.VerifyBeaconStateBlockRootsNative(
t,
func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{})
},
func(BR [][]byte) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{BlockRoots: BR})
},
)
s, err := InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
got := s.BlockRoots()
want := make([][]byte, fieldparams.BlockRootsLength)
for i := range want {
want[i] = make([]byte, 32)
}
require.DeepEqual(t, want, got)
want = make([][]byte, fieldparams.BlockRootsLength)
for i := range want {
if i == 0 {
want[i] = bytesutil.PadTo([]byte{'a'}, 32)
} else {
want[i] = make([]byte, 32)
}
}
s, err = InitializeFromProto(&ethpb.BeaconState{BlockRoots: want})
require.NoError(t, err)
got = s.BlockRoots()
require.DeepEqual(t, want, got)
// Test copy does not mutate.
got[0][0] = 'b'
require.DeepNotEqual(t, want, got)
}
func TestBeaconState_BlockRootAtIndex(t *testing.T) {
testtmpl.VerifyBeaconStateBlockRootAtIndexNative(
t,
func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{})
},
func(BR [][]byte) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{BlockRoots: BR})
},
)
s, err := InitializeFromProto(&ethpb.BeaconState{})
require.NoError(t, err)
got, err := s.BlockRootAtIndex(0)
require.NoError(t, err)
require.DeepEqual(t, bytesutil.PadTo([]byte{}, 32), got)
r := [fieldparams.BlockRootsLength][32]byte{{'a'}}
bRoots := make([][]byte, len(r))
for i, root := range r {
tmp := root
bRoots[i] = tmp[:]
}
s, err = InitializeFromProto(&ethpb.BeaconState{BlockRoots: bRoots})
require.NoError(t, err)
got, err = s.BlockRootAtIndex(0)
require.NoError(t, err)
want := bytesutil.PadTo([]byte{'a'}, 32)
require.DeepSSZEqual(t, want, got)
}

View File

@@ -9,13 +9,13 @@ import (
)
func TestBeaconState_SlotDataRace(t *testing.T) {
testtmpl.VerifyBeaconStateSlotDataRace(t, func() (state.BeaconState, error) {
testtmpl.VerifyBeaconState_SlotDataRace(t, func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{Slot: 1})
})
}
func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {
testtmpl.VerifyBeaconStateMatchCurrentJustifiedCheckptNative(
testtmpl.VerifyBeaconState_MatchCurrentJustifiedCheckptNative(
t,
func(cp *ethpb.Checkpoint) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{CurrentJustifiedCheckpoint: cp})
@@ -24,7 +24,7 @@ func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {
}
func TestBeaconState_MatchPreviousJustifiedCheckpt(t *testing.T) {
testtmpl.VerifyBeaconStateMatchPreviousJustifiedCheckptNative(
testtmpl.VerifyBeaconState_MatchPreviousJustifiedCheckptNative(
t,
func(cp *ethpb.Checkpoint) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{PreviousJustifiedCheckpoint: cp})
@@ -33,7 +33,7 @@ func TestBeaconState_MatchPreviousJustifiedCheckpt(t *testing.T) {
}
func TestBeaconState_ValidatorByPubkey(t *testing.T) {
testtmpl.VerifyBeaconStateValidatorByPubkey(t, func() (state.BeaconState, error) {
testtmpl.VerifyBeaconState_ValidatorByPubkey(t, func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconState{})
})
}

View File

@@ -123,7 +123,7 @@ func (b *BeaconState) ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]by
numOfVals := len(b.validators)
idx, ok := b.valMapHandler.Get(key)
if ok && types.ValidatorIndex(numOfVals) <= idx {
if ok && numOfVals <= int(idx) {
return types.ValidatorIndex(0), false
}
return idx, ok

View File

@@ -10,7 +10,7 @@ import (
)
func TestBeaconState_ValidatorAtIndexReadOnly_HandlesNilSlice(t *testing.T) {
testtmpl.VerifyBeaconStateValidatorAtIndexReadOnlyHandlesNilSlice(t, func() (state.BeaconState, error) {
testtmpl.VerifyBeaconState_ValidatorAtIndexReadOnly_HandlesNilSlice(t, func() (state.BeaconState, error) {
return v1.InitializeFromProtoUnsafe(&ethpb.BeaconState{
Validators: nil,
})

View File

@@ -86,7 +86,6 @@ go_test(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//beacon-chain/state/types:go_default_library",
"//beacon-chain/state/v2:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",

View File

@@ -1,4 +1,3 @@
//go:build !minimal
// +build !minimal
package v2

View File

@@ -1,4 +1,3 @@
//go:build !minimal
// +build !minimal
package v2

View File

@@ -1,4 +1,3 @@
//go:build minimal
// +build minimal
package v2

View File

@@ -1,4 +1,3 @@
//go:build minimal
// +build minimal
package v2

View File

@@ -3,43 +3,74 @@ package v2
import (
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
testtmpl "github.com/prysmaticlabs/prysm/beacon-chain/state/testing"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestBeaconState_LatestBlockHeader(t *testing.T) {
testtmpl.VerifyBeaconStateLatestBlockHeader(
t,
func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{})
},
func(BH *ethpb.BeaconBlockHeader) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{LatestBlockHeader: BH})
},
)
s, err := InitializeFromProto(&ethpb.BeaconStateAltair{})
require.NoError(t, err)
got := s.LatestBlockHeader()
require.DeepEqual(t, (*ethpb.BeaconBlockHeader)(nil), got)
want := &ethpb.BeaconBlockHeader{Slot: 100}
s, err = InitializeFromProto(&ethpb.BeaconStateAltair{LatestBlockHeader: want})
require.NoError(t, err)
got = s.LatestBlockHeader()
require.DeepEqual(t, want, got)
// Test copy does not mutate.
got.Slot = 101
require.DeepNotEqual(t, want, got)
}
func TestBeaconState_BlockRoots(t *testing.T) {
testtmpl.VerifyBeaconStateBlockRootsNative(
t,
func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{})
},
func(BR [][]byte) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{BlockRoots: BR})
},
)
s, err := InitializeFromProto(&ethpb.BeaconStateAltair{})
require.NoError(t, err)
got := s.BlockRoots()
want := make([][]byte, fieldparams.BlockRootsLength)
for i := range want {
want[i] = make([]byte, fieldparams.RootLength)
}
require.DeepEqual(t, want, got)
want = make([][]byte, fieldparams.BlockRootsLength)
for i := range want {
if i == 0 {
want[i] = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
} else {
want[i] = make([]byte, fieldparams.RootLength)
}
}
s, err = InitializeFromProto(&ethpb.BeaconStateAltair{BlockRoots: want})
require.NoError(t, err)
got = s.BlockRoots()
require.DeepEqual(t, want, got)
// Test copy does not mutate.
got[0][0] = 'b'
require.DeepNotEqual(t, want, got)
}
func TestBeaconState_BlockRootAtIndex(t *testing.T) {
testtmpl.VerifyBeaconStateBlockRootAtIndexNative(
t,
func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{})
},
func(BR [][]byte) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{BlockRoots: BR})
},
)
s, err := InitializeFromProto(&ethpb.BeaconStateAltair{})
require.NoError(t, err)
got, err := s.BlockRootAtIndex(0)
require.NoError(t, err)
require.DeepEqual(t, bytesutil.PadTo([]byte{}, fieldparams.RootLength), got)
r := [fieldparams.BlockRootsLength][fieldparams.RootLength]byte{{'a'}}
bRoots := make([][]byte, len(r))
for i, root := range r {
tmp := root
bRoots[i] = tmp[:]
}
s, err = InitializeFromProto(&ethpb.BeaconStateAltair{BlockRoots: bRoots})
require.NoError(t, err)
got, err = s.BlockRootAtIndex(0)
require.NoError(t, err)
want := bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
require.DeepSSZEqual(t, want, got)
}

View File

@@ -9,13 +9,13 @@ import (
)
func TestBeaconState_SlotDataRace(t *testing.T) {
testtmpl.VerifyBeaconStateSlotDataRace(t, func() (state.BeaconState, error) {
testtmpl.VerifyBeaconState_SlotDataRace(t, func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{Slot: 1})
})
}
func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {
testtmpl.VerifyBeaconStateMatchCurrentJustifiedCheckptNative(
testtmpl.VerifyBeaconState_MatchCurrentJustifiedCheckptNative(
t,
func(cp *ethpb.Checkpoint) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{CurrentJustifiedCheckpoint: cp})
@@ -24,7 +24,7 @@ func TestBeaconState_MatchCurrentJustifiedCheckpt(t *testing.T) {
}
func TestBeaconState_MatchPreviousJustifiedCheckpt(t *testing.T) {
testtmpl.VerifyBeaconStateMatchPreviousJustifiedCheckptNative(
testtmpl.VerifyBeaconState_MatchPreviousJustifiedCheckptNative(
t,
func(cp *ethpb.Checkpoint) (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{PreviousJustifiedCheckpoint: cp})
@@ -33,7 +33,7 @@ func TestBeaconState_MatchPreviousJustifiedCheckpt(t *testing.T) {
}
func TestBeaconState_ValidatorByPubkey(t *testing.T) {
testtmpl.VerifyBeaconStateValidatorByPubkey(t, func() (state.BeaconState, error) {
testtmpl.VerifyBeaconState_ValidatorByPubkey(t, func() (state.BeaconState, error) {
return InitializeFromProto(&ethpb.BeaconStateAltair{})
})
}

View File

@@ -130,7 +130,7 @@ func (b *BeaconState) ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]by
numOfVals := len(b.validators)
idx, ok := b.valMapHandler.Get(key)
if ok && types.ValidatorIndex(numOfVals) <= idx {
if ok && numOfVals <= int(idx) {
return types.ValidatorIndex(0), false
}
return idx, ok

Some files were not shown because too many files have changed in this diff Show More