diff --git a/.bazelrc b/.bazelrc index 98140f33d0..e270301e3f 100644 --- a/.bazelrc +++ b/.bazelrc @@ -15,7 +15,7 @@ run --host_force_python=PY2 # Network sandboxing only works on linux. --experimental_sandbox_default_allow_network=false -# Use minimal protobufs at runtime +# Use mainnet protobufs at runtime run --define ssz=mainnet test --define ssz=mainnet build --define ssz=mainnet diff --git a/README.md b/README.md index e7b7674515..51ee99ec09 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # Prysm: An Ethereum 2.0 Client Written in Go [![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm) -[![ETH2.0_Spec_Version 0.9.3](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v0.9.3-blue.svg)](https://github.com/ethereum/eth2.0-specs/tree/v0.9.3) +[![ETH2.0_Spec_Version 0.11.1](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v0.11.1-blue.svg)](https://github.com/ethereum/eth2.0-specs/tree/v0.11.1) [![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/KSA7rPr) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) diff --git a/WORKSPACE b/WORKSPACE index c6295516ed..72a88ed0ce 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -197,8 +197,8 @@ filegroup( visibility = ["//visibility:public"], ) """, - sha256 = "72c6ee3c20d19736b1203f364a6eb0ddee2c173073e20bee2beccd288fdc42be", - url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/general.tar.gz", + sha256 = "b90221d87b3b4cb17d7f195f8852f5dd8fec1cf623d42443b97bdb5a216ae61d", + url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.11.1/general.tar.gz", ) http_archive( @@ -213,8 +213,8 @@ filegroup( visibility = ["//visibility:public"], ) """, - sha256 = "a3cc860a3679f6f62ee57b65677a9b48a65fdebb151cdcbf50f23852632845ef", - url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/minimal.tar.gz", + sha256 = "316b227c0198f55872e46d601a578afeac88aab36ed38e3f01af753e98db156f", + url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.11.1/minimal.tar.gz", ) http_archive( @@ -229,8 +229,8 @@ filegroup( visibility = ["//visibility:public"], ) """, - sha256 = "8fc1b6220973ca30fa4ddc4ed24d66b1719abadca8bedb5e06c3bd9bc0df28e9", - url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/mainnet.tar.gz", + sha256 = "b9c52f60293bcc1acfd4f8ab7ddf8bf8222ddd6a105e93d384542d1396e1b07a", + url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.11.1/mainnet.tar.gz", ) http_archive( @@ -1305,7 +1305,7 @@ go_repository( go_repository( name = "com_github_prysmaticlabs_ethereumapis", - commit = "62fd1d2ec119bc93b0473fde17426c63a85197ed", + commit = "6607cc86ddb7c78acfe3b1f0dfb115489a96d46d", importpath = "github.com/prysmaticlabs/ethereumapis", patch_args = ["-p1"], patches = [ @@ -1639,6 +1639,14 @@ go_repository( version = "v1.20.0", ) +go_repository( + name = "com_github_wealdtech_eth2_signer_api", + build_file_proto_mode = "disable_global", + importpath = "github.com/wealdtech/eth2-signer-api", + sum = "h1:fqJYjKwG/FeUAJYYiZblIP6agiz3WWB+Hxpw85Fnr5I=", + version = "v1.0.1", +) + go_repository( name = "com_github_prysmaticlabs_prombbolt", importpath = "github.com/prysmaticlabs/prombbolt", @@ -1656,3 +1664,10 @@ go_repository( sum = "h1:GWsU1WjSE2rtvyTYGcndqmPPkQkBNV7pEuZdnGtwtu4=", version = "v0.0.0-20200321040036-d43e30eacb43", ) + +go_repository( + name = "org_golang_x_mod", + importpath = "golang.org/x/mod", + sum = "h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=", + version = "v0.2.0", +) diff --git a/beacon-chain/archiver/BUILD.bazel b/beacon-chain/archiver/BUILD.bazel index 06d93d2e1e..993e8dec4b 100644 --- a/beacon-chain/archiver/BUILD.bazel +++ b/beacon-chain/archiver/BUILD.bazel @@ -39,7 +39,6 @@ go_test( "//shared/testutil:go_default_library", "@com_github_gogo_protobuf//proto:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", - "@com_github_prysmaticlabs_go_bitfield//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", "@com_github_sirupsen_logrus//hooks/test:go_default_library", ], diff --git a/beacon-chain/archiver/service_test.go b/beacon-chain/archiver/service_test.go index ac59b94450..428b1ecc69 100644 --- a/beacon-chain/archiver/service_test.go +++ b/beacon-chain/archiver/service_test.go @@ -9,7 +9,6 @@ import ( "github.com/gogo/protobuf/proto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-bitfield" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" @@ -34,10 +33,8 @@ func TestArchiverService_ReceivesBlockProcessedEvent(t *testing.T) { hook := logTest.NewGlobal() svc, beaconDB := setupService(t) defer dbutil.TeardownDB(t, beaconDB) - st, err := stateTrie.InitializeFromProto(&pb.BeaconState{ - Slot: 1, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(1); err != nil { t.Fatal(err) } svc.headFetcher = &mock.ChainService{ @@ -61,10 +58,8 @@ func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) { svc, beaconDB := setupService(t) defer dbutil.TeardownDB(t, beaconDB) // The head state is NOT an epoch end. - st, err := stateTrie.InitializeFromProto(&pb.BeaconState{ - Slot: params.BeaconConfig().SlotsPerEpoch - 2, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(params.BeaconConfig().SlotsPerEpoch - 2); err != nil { t.Fatal(err) } svc.headFetcher = &mock.ChainService{ @@ -433,18 +428,20 @@ func setupState(validatorCount uint64) (*stateTrie.BeaconState, error) { // We initialize a head state that has attestations from participated // validators in a simulated fashion. - return stateTrie.InitializeFromProto(&pb.BeaconState{ - Slot: (2 * params.BeaconConfig().SlotsPerEpoch) - 1, - Validators: validators, - Balances: balances, - BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot), - Slashings: []uint64{0, 1e9, 1e9}, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - CurrentEpochAttestations: atts, - FinalizedCheckpoint: ðpb.Checkpoint{}, - JustificationBits: bitfield.Bitvector4{0x00}, - CurrentJustifiedCheckpoint: ðpb.Checkpoint{}, - }) + st := testutil.NewBeaconState() + if err := st.SetSlot((2 * params.BeaconConfig().SlotsPerEpoch) - 1); err != nil { + return nil, err + } + if err := st.SetValidators(validators); err != nil { + return nil, err + } + if err := st.SetBalances(balances); err != nil { + return nil, err + } + if err := st.SetCurrentEpochAttestations(atts); err != nil { + return nil, err + } + return st, nil } func setupService(t *testing.T) (*Service, db.Database) { diff --git a/beacon-chain/blockchain/chain_info_norace_test.go b/beacon-chain/blockchain/chain_info_norace_test.go index 003724ea3b..1919ff52a0 100644 --- a/beacon-chain/blockchain/chain_info_norace_test.go +++ b/beacon-chain/blockchain/chain_info_norace_test.go @@ -5,7 +5,9 @@ import ( "testing" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/prysm/beacon-chain/cache" testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" ) func TestHeadSlot_DataRace(t *testing.T) { @@ -28,6 +30,7 @@ func TestHeadRoot_DataRace(t *testing.T) { s := &Service{ beaconDB: db, head: &head{root: [32]byte{'A'}}, + stateGen: stategen.New(db, cache.NewStateSummaryCache()), } go func() { if err := s.saveHead(context.Background(), [32]byte{}, ); err != nil { @@ -45,6 +48,7 @@ func TestHeadBlock_DataRace(t *testing.T) { s := &Service{ beaconDB: db, head: &head{block: ðpb.SignedBeaconBlock{}}, + stateGen: stategen.New(db, cache.NewStateSummaryCache()), } go func() { if err := s.saveHead(context.Background(), [32]byte{}, ); err != nil { @@ -61,6 +65,7 @@ func TestHeadState_DataRace(t *testing.T) { defer testDB.TeardownDB(t, db) s := &Service{ beaconDB: db, + stateGen: stategen.New(db, cache.NewStateSummaryCache()), } go func() { if err := s.saveHead(context.Background(), [32]byte{}, ); err != nil { diff --git a/beacon-chain/blockchain/chain_info_test.go b/beacon-chain/blockchain/chain_info_test.go index 7871ab7539..e74bace822 100644 --- a/beacon-chain/blockchain/chain_info_test.go +++ b/beacon-chain/blockchain/chain_info_test.go @@ -166,7 +166,7 @@ func TestHeadBlock_CanRetrieve(t *testing.T) { } func TestHeadState_CanRetrieve(t *testing.T) { - s, err := state.InitializeFromProto(&pb.BeaconState{Slot: 2}) + s, err := state.InitializeFromProto(&pb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]}) if err != nil { t.Fatal(err) } @@ -176,7 +176,7 @@ func TestHeadState_CanRetrieve(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(s.InnerStateUnsafe(), headState.InnerStateUnsafe()) { + if !proto.Equal(s.InnerStateUnsafe(), headState.InnerStateUnsafe()) { t.Error("incorrect head state received") } } diff --git a/beacon-chain/blockchain/head.go b/beacon-chain/blockchain/head.go index 7c70d06169..68f344b74e 100644 --- a/beacon-chain/blockchain/head.go +++ b/beacon-chain/blockchain/head.go @@ -59,7 +59,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error { // If the head state is not available, just return nil. // There's nothing to cache - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { if !s.stateGen.StateSummaryExists(ctx, headRoot) { return nil } @@ -81,7 +81,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error { // Get the new head state from cached state or DB. var newHeadState *state.BeaconState - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { newHeadState, err = s.stateGen.StateByRoot(ctx, headRoot) if err != nil { return errors.Wrap(err, "could not retrieve head state in DB") @@ -121,7 +121,7 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, var headState *state.BeaconState var err error - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { headState, err = s.stateGen.StateByRoot(ctx, r) if err != nil { return errors.Wrap(err, "could not retrieve head state in DB") diff --git a/beacon-chain/blockchain/head_test.go b/beacon-chain/blockchain/head_test.go index c22bb78306..6bf71e906c 100644 --- a/beacon-chain/blockchain/head_test.go +++ b/beacon-chain/blockchain/head_test.go @@ -9,8 +9,8 @@ import ( ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" - "github.com/prysmaticlabs/prysm/beacon-chain/state" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/testutil" ) func TestSaveHead_Same(t *testing.T) { @@ -44,6 +44,7 @@ func TestSaveHead_Different(t *testing.T) { newHeadBlock := ðpb.BeaconBlock{Slot: 1} newHeadSignedBlock := ðpb.SignedBeaconBlock{Block: newHeadBlock} + if err := service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock); err != nil { t.Fatal(err) } @@ -51,8 +52,11 @@ func TestSaveHead_Different(t *testing.T) { if err != nil { t.Fatal(err) } - headState, err := state.InitializeFromProto(&pb.BeaconState{Slot: 1}) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(1); err != nil { + t.Fatal(err) + } + if err := service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}); err != nil { t.Fatal(err) } if err := service.beaconDB.SaveState(context.Background(), headState, newRoot); err != nil { diff --git a/beacon-chain/blockchain/process_attestation.go b/beacon-chain/blockchain/process_attestation.go index cbacc7e6d1..0cdea8635e 100644 --- a/beacon-chain/blockchain/process_attestation.go +++ b/beacon-chain/blockchain/process_attestation.go @@ -98,7 +98,7 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]ui } // Verify Attestations cannot be from future epochs. - if err := helpers.VerifySlotTime(genesisTime, tgtSlot); err != nil { + if err := helpers.VerifySlotTime(genesisTime, tgtSlot, helpers.TimeShiftTolerance); err != nil { return nil, errors.Wrap(err, "could not verify attestation target slot") } @@ -108,7 +108,7 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]ui } // Verify attestations can only affect the fork choice of subsequent slots. - if err := helpers.VerifySlotTime(genesisTime, a.Data.Slot); err != nil { + if err := helpers.VerifySlotTime(genesisTime, a.Data.Slot, helpers.TimeShiftTolerance); err != nil { return nil, err } @@ -125,6 +125,16 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]ui } } + if indexedAtt.AttestingIndices == nil { + return nil, errors.New("nil attesting indices") + } + if a.Data == nil { + return nil, errors.New("nil att data") + } + if a.Data.Target == nil { + return nil, errors.New("nil att target") + } + // Update forkchoice store with the new attestation for updating weight. s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch) diff --git a/beacon-chain/blockchain/process_attestation_helpers.go b/beacon-chain/blockchain/process_attestation_helpers.go index a21fb49cc4..6f497a28f7 100644 --- a/beacon-chain/blockchain/process_attestation_helpers.go +++ b/beacon-chain/blockchain/process_attestation_helpers.go @@ -32,7 +32,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta } var baseState *stateTrie.BeaconState - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { baseState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root)) if err != nil { return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch)) @@ -123,21 +123,25 @@ func (s *Service) verifyAttestation(ctx context.Context, baseState *stateTrie.Be } indexedAtt := attestationutil.ConvertToIndexed(ctx, a, committee) if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil { - if err == blocks.ErrSigFailedToVerify { + if err == helpers.ErrSigFailedToVerify { // When sig fails to verify, check if there's a differences in committees due to // different seeds. var aState *stateTrie.BeaconState var err error - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { aState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) - return nil, err + if err != nil { + return nil, err + } + } else { + aState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) + if err != nil { + return nil, err + } } - - aState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) - if err != nil { - return nil, err + if aState == nil { + return nil, fmt.Errorf("nil state for block root %#x", a.Data.BeaconBlockRoot) } - epoch := helpers.SlotToEpoch(a.Data.Slot) origSeed, err := helpers.Seed(baseState, epoch, params.BeaconConfig().DomainBeaconAttester) if err != nil { diff --git a/beacon-chain/blockchain/process_attestation_test.go b/beacon-chain/blockchain/process_attestation_test.go index 59cc0182ef..1d305fef6f 100644 --- a/beacon-chain/blockchain/process_attestation_test.go +++ b/beacon-chain/blockchain/process_attestation_test.go @@ -2,18 +2,19 @@ package blockchain import ( "context" - "reflect" "strings" "testing" + "github.com/gogo/protobuf/proto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray" - beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/params" @@ -25,7 +26,11 @@ func TestStore_OnAttestation(t *testing.T) { db := testDB.SetupDB(t) defer testDB.TeardownDB(t, db) - cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} + cfg := &Config{ + BeaconDB: db, + ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), + StateGen: stategen.New(db, cache.NewStateSummaryCache()), + } service, err := NewService(ctx, cfg) if err != nil { t.Fatal(err) @@ -54,7 +59,7 @@ func TestStore_OnAttestation(t *testing.T) { t.Fatal(err) } - s, err := beaconstate.InitializeFromProto(&pb.BeaconState{}) + s := testutil.NewBeaconState() if err := s.SetSlot(100 * params.BeaconConfig().SlotsPerEpoch); err != nil { t.Fatal(err) } @@ -66,19 +71,17 @@ func TestStore_OnAttestation(t *testing.T) { if err := db.SaveBlock(ctx, BlkWithValidState); err != nil { t.Fatal(err) } + BlkWithValidStateRoot, err := ssz.HashTreeRoot(BlkWithValidState.Block) if err != nil { t.Fatal(err) } - s, err = stateTrie.InitializeFromProto(&pb.BeaconState{ - Fork: &pb.Fork{ - Epoch: 0, - CurrentVersion: params.BeaconConfig().GenesisForkVersion, - PreviousVersion: params.BeaconConfig().GenesisForkVersion, - }, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { + s = testutil.NewBeaconState() + if err := s.SetFork(&pb.Fork{ + Epoch: 0, + CurrentVersion: params.BeaconConfig().GenesisForkVersion, + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + }); err != nil { t.Fatal(err) } if err := service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot); err != nil { @@ -111,7 +114,7 @@ func TestStore_OnAttestation(t *testing.T) { a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}, s: &pb.BeaconState{}, wantErr: true, - wantErrString: "pre state of target block 0 does not exist", + wantErrString: "could not get pre state for slot 0: unknown boundary state", }, { name: "process attestation doesn't match current epoch", @@ -141,9 +144,11 @@ func TestStore_SaveCheckpointState(t *testing.T) { ctx := context.Background() db := testDB.SetupDB(t) defer testDB.TeardownDB(t, db) - params.UseDemoBeaconConfig() - cfg := &Config{BeaconDB: db} + cfg := &Config{ + BeaconDB: db, + StateGen: stategen.New(db, cache.NewStateSummaryCache()), + } service, err := NewService(ctx, cfg) if err != nil { t.Fatal(err) @@ -172,15 +177,20 @@ func TestStore_SaveCheckpointState(t *testing.T) { if err := service.beaconDB.SaveState(ctx, s, r); err != nil { t.Fatal(err) } + service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]} service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: r[:]} service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]} service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: r[:]} + r = bytesutil.ToBytes32([]byte{'A'}) cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)} if err := service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})); err != nil { t.Fatal(err) } + if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}); err != nil { + t.Fatal(err) + } s1, err := service.getAttPreState(ctx, cp1) if err != nil { t.Fatal(err) @@ -193,6 +203,9 @@ func TestStore_SaveCheckpointState(t *testing.T) { if err := service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})); err != nil { t.Fatal(err) } + if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}); err != nil { + t.Fatal(err) + } s2, err := service.getAttPreState(ctx, cp2) if err != nil { t.Fatal(err) @@ -236,6 +249,9 @@ func TestStore_SaveCheckpointState(t *testing.T) { if err := service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})); err != nil { t.Fatal(err) } + if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}); err != nil { + t.Fatal(err) + } s3, err := service.getAttPreState(ctx, cp3) if err != nil { t.Fatal(err) @@ -250,7 +266,10 @@ func TestStore_UpdateCheckpointState(t *testing.T) { db := testDB.SetupDB(t) defer testDB.TeardownDB(t, db) - cfg := &Config{BeaconDB: db} + cfg := &Config{ + BeaconDB: db, + StateGen: stategen.New(db, cache.NewStateSummaryCache()), + } service, err := NewService(ctx, cfg) if err != nil { t.Fatal(err) @@ -302,7 +321,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(returned, cached) { + if !proto.Equal(returned.InnerStateUnsafe(), cached.InnerStateUnsafe()) { t.Error("Incorrectly cached base state") } } diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go index 8f84abaaed..e3e75e0e9a 100644 --- a/beacon-chain/blockchain/process_block.go +++ b/beacon-chain/blockchain/process_block.go @@ -92,7 +92,7 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) return nil, errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot) } - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { if err := s.stateGen.SaveState(ctx, root, postState); err != nil { return nil, errors.Wrap(err, "could not save state") } @@ -122,7 +122,7 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) return nil, errors.Wrap(err, "could not save finalized checkpoint") } - if !featureconfig.Get().NewStateMgmt { + if featureconfig.Get().DisableNewStateMgmt { startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch) endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch) if endSlot > startSlot { @@ -147,7 +147,7 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) return nil, errors.Wrap(err, "could not save new justified") } - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root) fBlock, err := s.beaconDB.Block(ctx, fRoot) if err != nil { @@ -233,7 +233,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot) } - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { if err := s.stateGen.SaveState(ctx, root, postState); err != nil { return errors.Wrap(err, "could not save state") } @@ -268,7 +268,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed // Update finalized check point. Prune the block cache and helper caches on every new finalized epoch. if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch { - if !featureconfig.Get().NewStateMgmt { + if featureconfig.Get().DisableNewStateMgmt { startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch) endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch) if endSlot > startSlot { @@ -301,7 +301,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed return errors.Wrap(err, "could not save new justified") } - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root) fBlock, err := s.beaconDB.Block(ctx, fRoot) if err != nil { @@ -313,7 +313,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed } } - if !featureconfig.Get().NewStateMgmt { + if featureconfig.Get().DisableNewStateMgmt { numOfStates := len(s.boundaryRoots) if numOfStates > initialSyncCacheSize { if err = s.persistCachedStates(ctx, numOfStates); err != nil { @@ -338,7 +338,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed return err } - if !featureconfig.Get().NewStateMgmt && helpers.IsEpochStart(postState.Slot()) { + if featureconfig.Get().DisableNewStateMgmt && helpers.IsEpochStart(postState.Slot()) { if err := s.beaconDB.SaveState(ctx, postState, root); err != nil { return errors.Wrap(err, "could not save state") } diff --git a/beacon-chain/blockchain/process_block_helpers.go b/beacon-chain/blockchain/process_block_helpers.go index c6b11b51e9..2d39189211 100644 --- a/beacon-chain/blockchain/process_block_helpers.go +++ b/beacon-chain/blockchain/process_block_helpers.go @@ -38,7 +38,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (* } // Verify block slot time is not from the feature. - if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot); err != nil { + if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot, helpers.TimeShiftTolerance); err != nil { return nil, err } @@ -60,8 +60,12 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) ( ctx, span := trace.StartSpan(ctx, "chainService.verifyBlkPreState") defer span.End() - if featureconfig.Get().NewStateMgmt { - preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot)) + if !featureconfig.Get().DisableNewStateMgmt { + parentRoot := bytesutil.ToBytes32(b.ParentRoot) + if !s.stateGen.StateSummaryExists(ctx, parentRoot) { + return nil, errors.New("provided block root does not have block saved in the db") + } + preState, err := s.stateGen.StateByRoot(ctx, parentRoot) if err != nil { return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot) } @@ -265,7 +269,7 @@ func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconSt s.justifiedCheckpt = cpt } - if !featureconfig.Get().NewStateMgmt { + if featureconfig.Get().DisableNewStateMgmt { justifiedRoot := bytesutil.ToBytes32(cpt.Root) justifiedState := s.initSyncState[justifiedRoot] diff --git a/beacon-chain/blockchain/process_block_test.go b/beacon-chain/blockchain/process_block_test.go index 87df9d576a..2a8a8e86ac 100644 --- a/beacon-chain/blockchain/process_block_test.go +++ b/beacon-chain/blockchain/process_block_test.go @@ -9,12 +9,14 @@ import ( ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/db" testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/params" @@ -26,7 +28,10 @@ func TestStore_OnBlock(t *testing.T) { db := testDB.SetupDB(t) defer testDB.TeardownDB(t, db) - cfg := &Config{BeaconDB: db} + cfg := &Config{ + BeaconDB: db, + StateGen: stategen.New(db, cache.NewStateSummaryCache()), + } service, err := NewService(ctx, cfg) if err != nil { t.Fatal(err) @@ -41,10 +46,7 @@ func TestStore_OnBlock(t *testing.T) { if err != nil { t.Error(err) } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() if err := service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot); err != nil { t.Fatal(err) } @@ -60,10 +62,16 @@ func TestStore_OnBlock(t *testing.T) { if err != nil { t.Error(err) } + if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}); err != nil { + t.Fatal(err) + } if err := service.beaconDB.SaveState(ctx, st.Copy(), randomParentRoot); err != nil { t.Fatal(err) } randomParentRoot2 := roots[1] + if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2[:]}); err != nil { + t.Fatal(err) + } if err := service.beaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)); err != nil { t.Fatal(err) } @@ -144,8 +152,8 @@ func TestRemoveStateSinceLastFinalized(t *testing.T) { if err != nil { t.Fatal(err) } - s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: uint64(i)}) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetSlot(uint64(i)); err != nil { t.Fatal(err) } if err := service.beaconDB.SaveState(ctx, s, r); err != nil { @@ -290,24 +298,32 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) { } } -func TestCachedPreState_CanGetFromCache(t *testing.T) { +func TestCachedPreState_CanGetFromStateSummary(t *testing.T) { ctx := context.Background() db := testDB.SetupDB(t) defer testDB.TeardownDB(t, db) - cfg := &Config{BeaconDB: db} + cfg := &Config{ + BeaconDB: db, + StateGen: stategen.New(db, cache.NewStateSummaryCache()), + } service, err := NewService(ctx, cfg) if err != nil { t.Fatal(err) } - s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1}) + s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]}) if err != nil { t.Fatal(err) } r := [32]byte{'A'} b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]} - service.initSyncState[r] = s + if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: r[:]}); err != nil { + t.Fatal(err) + } + if err := service.stateGen.SaveState(ctx, r, s); err != nil { + t.Fatal(err) + } received, err := service.verifyBlkPreState(ctx, b) if err != nil { @@ -323,7 +339,10 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) { db := testDB.SetupDB(t) defer testDB.TeardownDB(t, db) - cfg := &Config{BeaconDB: db} + cfg := &Config{ + BeaconDB: db, + StateGen: stategen.New(db, cache.NewStateSummaryCache()), + } service, err := NewService(ctx, cfg) if err != nil { t.Fatal(err) @@ -334,8 +353,8 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) { service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]} _, err = service.verifyBlkPreState(ctx, b) - wanted := "pre state of slot 1 does not exist" - if err == nil || err.Error() != wanted { + wanted := "provided block root does not have block saved in the db" + if err.Error() != wanted { t.Error("Did not get wanted error") } @@ -343,7 +362,10 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) { if err != nil { t.Fatal(err) } - if err := service.beaconDB.SaveState(ctx, s, r); err != nil { + if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: r[:]}); err != nil { + t.Fatal(err) + } + if err := service.stateGen.SaveState(ctx, r, s); err != nil { t.Fatal(err) } @@ -351,7 +373,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(s, received) { + if s.Slot() != received.Slot() { t.Error("cached state not the same") } } @@ -369,8 +391,8 @@ func TestSaveInitState_CanSaveDelete(t *testing.T) { for i := uint64(0); i < 64; i++ { b := ðpb.BeaconBlock{Slot: i} - s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: i}) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetSlot(i); err != nil { t.Fatal(err) } r, err := ssz.HashTreeRoot(b) @@ -385,10 +407,9 @@ func TestSaveInitState_CanSaveDelete(t *testing.T) { if err != nil { t.Fatal(err) } - - s, err := stateTrie.InitializeFromProto(&pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{ - Epoch: 1, Root: finalizedRoot[:]}}) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetFinalizedCheckpoint(ðpb.Checkpoint{ + Epoch: 1, Root: finalizedRoot[:]}); err != nil { t.Fatal(err) } if err := service.saveInitState(ctx, s); err != nil { @@ -426,18 +447,15 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) { } service.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}} service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}} - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() service.initSyncState[r] = st.Copy() if err := db.SaveState(ctx, st.Copy(), r); err != nil { t.Fatal(err) } // Could update - s, err := stateTrie.InitializeFromProto(&pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: r[:]}}) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{Epoch: 1, Root: r[:]}); err != nil { t.Fatal(err) } if err := service.updateJustified(context.Background(), s); err != nil { @@ -480,10 +498,7 @@ func TestFilterBlockRoots_CanFilter(t *testing.T) { if err != nil { t.Fatal(err) } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() if err := service.beaconDB.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: fBlock}); err != nil { t.Fatal(err) } @@ -526,10 +541,7 @@ func TestPersistCache_CanSave(t *testing.T) { if err != nil { t.Fatal(err) } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() for i := uint64(0); i < initialSyncCacheSize; i++ { if err := st.SetSlot(i); err != nil { @@ -583,10 +595,8 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) { if err != nil { t.Error(err) } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() + if err := service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot); err != nil { t.Fatal(err) } @@ -640,10 +650,8 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) { if err != nil { t.Error(err) } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() + if err := service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot); err != nil { t.Fatal(err) } @@ -732,10 +740,8 @@ func blockTree1(db db.Database, genesisRoot []byte) ([][]byte, error) { if err != nil { return nil, err } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - return nil, err - } + st := testutil.NewBeaconState() + for _, b := range []*ethpb.BeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} { if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil { return nil, err diff --git a/beacon-chain/blockchain/receive_attestation.go b/beacon-chain/blockchain/receive_attestation.go index 81587c1007..94fc60869e 100644 --- a/beacon-chain/blockchain/receive_attestation.go +++ b/beacon-chain/blockchain/receive_attestation.go @@ -91,7 +91,7 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) { atts := s.attPool.ForkchoiceAttestations() for _, a := range atts { var hasState bool - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { hasState = s.stateGen.StateSummaryExists(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) } else { hasState = s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root)) diff --git a/beacon-chain/blockchain/service.go b/beacon-chain/blockchain/service.go index 912719d330..828c0322a2 100644 --- a/beacon-chain/blockchain/service.go +++ b/beacon-chain/blockchain/service.go @@ -140,7 +140,7 @@ func (s *Service) Start() { } if beaconState == nil { - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { beaconState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(cp.Root)) if err != nil { log.Fatalf("Could not fetch beacon state by root: %v", err) @@ -181,7 +181,7 @@ func (s *Service) Start() { s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint) s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint) - if !featureconfig.Get().NewStateMgmt { + if featureconfig.Get().DisableNewStateMgmt { if finalizedCheckpoint.Epoch > 1 { if err := s.pruneGarbageState(ctx, helpers.StartSlot(finalizedCheckpoint.Epoch)-params.BeaconConfig().SlotsPerEpoch); err != nil { log.WithError(err).Warn("Could not prune old states") @@ -192,7 +192,8 @@ func (s *Service) Start() { s.stateNotifier.StateFeed().Send(&feed.Event{ Type: statefeed.Initialized, Data: &statefeed.InitializedData{ - StartTime: s.genesisTime, + StartTime: s.genesisTime, + GenesisValidatorsRoot: beaconState.GenesisValidatorRoot(), }, }) } else { @@ -237,13 +238,15 @@ func (s *Service) Start() { // deposit contract, initializes the beacon chain's state, and kicks off the beacon chain. func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) { preGenesisState := s.chainStartFetcher.PreGenesisState() - if err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.chainStartFetcher.ChainStartEth1Data()); err != nil { + initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.chainStartFetcher.ChainStartEth1Data()) + if err != nil { log.Fatalf("Could not initialize beacon chain: %v", err) } s.stateNotifier.StateFeed().Send(&feed.Event{ Type: statefeed.Initialized, Data: &statefeed.InitializedData{ - StartTime: genesisTime, + StartTime: genesisTime, + GenesisValidatorsRoot: initializedState.GenesisValidatorRoot(), }, }) } @@ -255,7 +258,7 @@ func (s *Service) initializeBeaconChain( ctx context.Context, genesisTime time.Time, preGenesisState *stateTrie.BeaconState, - eth1data *ethpb.Eth1Data) error { + eth1data *ethpb.Eth1Data) (*stateTrie.BeaconState, error) { _, span := trace.StartSpan(context.Background(), "beacon-chain.Service.initializeBeaconChain") defer span.End() s.genesisTime = genesisTime @@ -263,11 +266,11 @@ func (s *Service) initializeBeaconChain( genesisState, err := state.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data) if err != nil { - return errors.Wrap(err, "could not initialize genesis state") + return nil, errors.Wrap(err, "could not initialize genesis state") } if err := s.saveGenesisData(ctx, genesisState); err != nil { - return errors.Wrap(err, "could not save genesis data") + return nil, errors.Wrap(err, "could not save genesis data") } log.Info("Initialized beacon chain genesis state") @@ -277,15 +280,15 @@ func (s *Service) initializeBeaconChain( // Update committee shuffled indices for genesis epoch. if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil { - return err + return nil, err } if err := helpers.UpdateProposerIndicesInCache(genesisState, 0 /* genesis epoch */); err != nil { - return err + return nil, err } s.opsService.SetGenesisTime(genesisState.GenesisTime()) - return nil + return genesisState, nil } // Stop the blockchain service's main event loop and associated goroutines. @@ -324,7 +327,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.B if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil { return errors.Wrap(err, "could not save genesis block") } - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { if err := s.stateGen.SaveState(ctx, genesisBlkRoot, genesisState); err != nil { return errors.Wrap(err, "could not save genesis state") } @@ -412,7 +415,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error { } finalizedRoot := bytesutil.ToBytes32(finalized.Root) var finalizedState *stateTrie.BeaconState - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { finalizedRoot = s.beaconDB.LastArchivedIndexRoot(ctx) finalizedState, err = s.stateGen.Resume(ctx) if err != nil { diff --git a/beacon-chain/blockchain/service_test.go b/beacon-chain/blockchain/service_test.go index 1a1759c317..479dcea3e9 100644 --- a/beacon-chain/blockchain/service_test.go +++ b/beacon-chain/blockchain/service_test.go @@ -12,6 +12,7 @@ import ( "github.com/gogo/protobuf/proto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" ssz "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache" b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" @@ -26,6 +27,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/p2p" "github.com/prysmaticlabs/prysm/beacon-chain/powchain" beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" protodb "github.com/prysmaticlabs/prysm/proto/beacon/db" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/event" @@ -144,12 +146,10 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service { P2p: &mockBroadcaster{}, StateNotifier: &mockBeaconNode{}, AttPool: attestations.NewPool(), + StateGen: stategen.New(beaconDB, cache.NewStateSummaryCache()), ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash), OpsService: opsService, } - if err != nil { - t.Fatalf("could not register blockchain service: %v", err) - } chainService, err := NewService(ctx, cfg) if err != nil { @@ -231,8 +231,8 @@ func TestChainStartStop_Initialized(t *testing.T) { if err := db.SaveBlock(ctx, genesisBlk); err != nil { t.Fatal(err) } - s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: 1}) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetSlot(1); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, s, blkRoot); err != nil { @@ -289,14 +289,14 @@ func TestChainService_InitializeBeaconChain(t *testing.T) { DepositRoot: hashTreeRoot[:], DepositCount: uint64(len(deposits)), }) - if err != nil { - t.Fatal(err) + for _, deposit := range deposits { + genState, err = b.ProcessPreGenesisDeposit(ctx, genState, deposit) + if err != nil { + t.Fatal(err) + } } - genState, err = b.ProcessDeposits(ctx, genState, ðpb.BeaconBlockBody{Deposits: deposits}) - if err != nil { - t.Fatal(err) - } - if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, ðpb.Eth1Data{ + + if _, err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, ðpb.Eth1Data{ DepositRoot: hashTreeRoot[:], }); err != nil { t.Fatal(err) @@ -336,8 +336,11 @@ func TestChainService_InitializeChainInfo(t *testing.T) { finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1 headBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: finalizedSlot, ParentRoot: genesisRoot[:]}} - headState, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: finalizedSlot}) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(finalizedSlot); err != nil { + t.Fatal(err) + } + if err := headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]); err != nil { t.Fatal(err) } headRoot, err := ssz.HashTreeRoot(headBlock.Block) @@ -347,6 +350,9 @@ func TestChainService_InitializeChainInfo(t *testing.T) { if err := db.SaveState(ctx, headState, headRoot); err != nil { t.Fatal(err) } + if err := db.SaveState(ctx, headState, genesisRoot); err != nil { + t.Fatal(err) + } if err := db.SaveBlock(ctx, headBlock); err != nil { t.Fatal(err) } @@ -359,7 +365,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) { if err := db.SaveBlock(ctx, headBlock); err != nil { t.Fatal(err) } - c := &Service{beaconDB: db} + c := &Service{beaconDB: db, stateGen: stategen.New(db, cache.NewStateSummaryCache())} if err := c.initializeChainInfo(ctx); err != nil { t.Fatal(err) } @@ -398,17 +404,18 @@ func TestChainService_SaveHeadNoDB(t *testing.T) { ctx := context.Background() s := &Service{ beaconDB: db, + stateGen: stategen.New(db, cache.NewStateSummaryCache()), } b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}} r, err := ssz.HashTreeRoot(b) if err != nil { t.Fatal(err) } - state := &pb.BeaconState{} - newState, err := beaconstate.InitializeFromProto(state) - if err := s.beaconDB.SaveState(ctx, newState, r); err != nil { + newState := testutil.NewBeaconState() + if err := s.stateGen.SaveState(ctx, r, newState); err != nil { t.Fatal(err) } + if err := s.saveHeadNoDB(ctx, b, r); err != nil { t.Fatal(err) } @@ -439,9 +446,8 @@ func TestChainService_PruneOldStates(t *testing.T) { if err != nil { t.Fatal(err) } - state := &pb.BeaconState{Slot: uint64(i)} - newState, err := beaconstate.InitializeFromProto(state) - if err != nil { + newState := testutil.NewBeaconState() + if err := newState.SetSlot(uint64(i)); err != nil { t.Fatal(err) } if err := s.beaconDB.SaveState(ctx, newState, r); err != nil { diff --git a/beacon-chain/cache/BUILD.bazel b/beacon-chain/cache/BUILD.bazel index 9b508498b3..87454c83b8 100644 --- a/beacon-chain/cache/BUILD.bazel +++ b/beacon-chain/cache/BUILD.bazel @@ -41,6 +41,7 @@ go_test( "attestation_data_test.go", "checkpoint_state_test.go", "committee_fuzz_test.go", + "committee_ids_test.go", "committee_test.go", "eth1_data_test.go", "feature_flag_test.go", diff --git a/beacon-chain/cache/checkpoint_state_test.go b/beacon-chain/cache/checkpoint_state_test.go index 17ce73d417..8c64aaba5d 100644 --- a/beacon-chain/cache/checkpoint_state_test.go +++ b/beacon-chain/cache/checkpoint_state_test.go @@ -4,10 +4,12 @@ import ( "reflect" "testing" - "github.com/prysmaticlabs/prysm/shared/bytesutil" + "github.com/gogo/protobuf/proto" + "github.com/prysmaticlabs/prysm/shared/params" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/hashutil" ) @@ -48,7 +50,8 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) { cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)} st, err := stateTrie.InitializeFromProto(&pb.BeaconState{ - Slot: 64, + GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:], + Slot: 64, }) if err != nil { t.Fatal(err) @@ -72,7 +75,7 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) { if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(state.InnerStateUnsafe(), info1.State.InnerStateUnsafe()) { + if !proto.Equal(state.InnerStateUnsafe(), info1.State.InnerStateUnsafe()) { t.Error("incorrectly cached state") } diff --git a/beacon-chain/cache/committee_ids.go b/beacon-chain/cache/committee_ids.go index 85672884f1..82cbdfb4e7 100644 --- a/beacon-chain/cache/committee_ids.go +++ b/beacon-chain/cache/committee_ids.go @@ -4,39 +4,82 @@ import ( "sync" lru "github.com/hashicorp/golang-lru" + "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/sliceutil" ) type committeeIDs struct { - cache *lru.Cache - lock sync.RWMutex + attester *lru.Cache + attesterLock sync.RWMutex + aggregator *lru.Cache + aggregatorLock sync.RWMutex } -// CommitteeIDs for attestations. +// CommitteeIDs for attester and aggregator. var CommitteeIDs = newCommitteeIDs() func newCommitteeIDs() *committeeIDs { - cache, err := lru.New(8) + // Given a node can calculate committee assignments of current epoch and next epoch. + // Max size is set to 2 epoch length. + cacheSize := int(params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2) + attesterCache, err := lru.New(cacheSize) if err != nil { panic(err) } - return &committeeIDs{cache: cache} -} - -// AddIDs to the cache for attestation committees by epoch. -func (t *committeeIDs) AddIDs(indices []uint64, epoch uint64) { - t.lock.Lock() - defer t.lock.Unlock() - val, exists := t.cache.Get(epoch) - if exists { - indices = sliceutil.UnionUint64(append(indices, val.([]uint64)...)) + aggregatorCache, err := lru.New(cacheSize) + if err != nil { + panic(err) } - t.cache.Add(epoch, indices) + return &committeeIDs{attester: attesterCache, aggregator: aggregatorCache} } -// GetIDs from the cache for attestation committees by epoch. -func (t *committeeIDs) GetIDs(epoch uint64) []uint64 { - val, exists := t.cache.Get(epoch) +// AddAttesterCommiteeID adds committee ID for subscribing subnet for the attester of a given slot. +func (c *committeeIDs) AddAttesterCommiteeID(slot uint64, committeeID uint64) { + c.attesterLock.Lock() + defer c.attesterLock.Unlock() + + ids := []uint64{committeeID} + val, exists := c.attester.Get(slot) + if exists { + ids = sliceutil.UnionUint64(append(val.([]uint64), ids...)) + } + c.attester.Add(slot, ids) +} + +// GetAttesterCommitteeIDs gets the committee ID for subscribing subnet for attester of the slot. +func (c *committeeIDs) GetAttesterCommitteeIDs(slot uint64) []uint64 { + c.attesterLock.RLock() + defer c.attesterLock.RUnlock() + + val, exists := c.attester.Get(slot) + if !exists { + return nil + } + if v, ok := val.([]uint64); ok { + return v + } + return nil +} + +// AddAggregatorCommiteeID adds committee ID for subscribing subnet for the aggregator of a given slot. +func (c *committeeIDs) AddAggregatorCommiteeID(slot uint64, committeeID uint64) { + c.aggregatorLock.Lock() + defer c.aggregatorLock.Unlock() + + ids := []uint64{committeeID} + val, exists := c.aggregator.Get(slot) + if exists { + ids = sliceutil.UnionUint64(append(val.([]uint64), ids...)) + } + c.aggregator.Add(slot, ids) +} + +// GetAggregatorCommitteeIDs gets the committee ID for subscribing subnet for aggregator of the slot. +func (c *committeeIDs) GetAggregatorCommitteeIDs(slot uint64) []uint64 { + c.aggregatorLock.RLock() + defer c.aggregatorLock.RUnlock() + + val, exists := c.aggregator.Get(slot) if !exists { return []uint64{} } diff --git a/beacon-chain/cache/committee_ids_test.go b/beacon-chain/cache/committee_ids_test.go new file mode 100644 index 0000000000..3ae97533a8 --- /dev/null +++ b/beacon-chain/cache/committee_ids_test.go @@ -0,0 +1,56 @@ +package cache + +import ( + "reflect" + "testing" +) + +func TestCommitteeIDCache_RoundTrip(t *testing.T) { + c := newCommitteeIDs() + slot := uint64(100) + committeeIDs := c.GetAggregatorCommitteeIDs(slot) + if len(committeeIDs) != 0 { + t.Errorf("Empty cache returned an object: %v", committeeIDs) + } + + c.AddAggregatorCommiteeID(slot, 1) + res := c.GetAggregatorCommitteeIDs(slot) + if !reflect.DeepEqual(res, []uint64{1}) { + t.Error("Expected equal value to return from cache") + } + + c.AddAggregatorCommiteeID(slot, 2) + res = c.GetAggregatorCommitteeIDs(slot) + if !reflect.DeepEqual(res, []uint64{1, 2}) { + t.Error("Expected equal value to return from cache") + } + + c.AddAggregatorCommiteeID(slot, 3) + res = c.GetAggregatorCommitteeIDs(slot) + if !reflect.DeepEqual(res, []uint64{1, 2, 3}) { + t.Error("Expected equal value to return from cache") + } + + committeeIDs = c.GetAttesterCommitteeIDs(slot) + if len(committeeIDs) != 0 { + t.Errorf("Empty cache returned an object: %v", committeeIDs) + } + + c.AddAttesterCommiteeID(slot, 11) + res = c.GetAttesterCommitteeIDs(slot) + if !reflect.DeepEqual(res, []uint64{11}) { + t.Error("Expected equal value to return from cache") + } + + c.AddAttesterCommiteeID(slot, 22) + res = c.GetAttesterCommitteeIDs(slot) + if !reflect.DeepEqual(res, []uint64{11, 22}) { + t.Error("Expected equal value to return from cache") + } + + c.AddAttesterCommiteeID(slot, 33) + res = c.GetAttesterCommitteeIDs(slot) + if !reflect.DeepEqual(res, []uint64{11, 22, 33}) { + t.Error("Expected equal value to return from cache") + } +} diff --git a/beacon-chain/core/blocks/BUILD.bazel b/beacon-chain/core/blocks/BUILD.bazel index 33810c18b8..66a0c9c248 100644 --- a/beacon-chain/core/blocks/BUILD.bazel +++ b/beacon-chain/core/blocks/BUILD.bazel @@ -42,6 +42,7 @@ go_test( srcs = [ "block_operations_fuzz_test.go", "block_operations_test.go", + "block_regression_test.go", "block_test.go", "eth1_data_test.go", ], @@ -49,6 +50,7 @@ go_test( deps = [ "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stateutil:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//shared/attestationutil:go_default_library", "//shared/bls:go_default_library", diff --git a/beacon-chain/core/blocks/block_operations.go b/beacon-chain/core/blocks/block_operations.go index be7b8e3e06..c64349e2d4 100644 --- a/beacon-chain/core/blocks/block_operations.go +++ b/beacon-chain/core/blocks/block_operations.go @@ -35,50 +35,8 @@ var log = logrus.WithField("prefix", "blocks") var eth1DataCache = cache.NewEth1DataVoteCache() -// ErrSigFailedToVerify returns when a signature of a block object(ie attestation, slashing, exit... etc) -// failed to verify. -var ErrSigFailedToVerify = errors.New("signature did not verify") - -func verifySigningRoot(obj interface{}, pub []byte, signature []byte, domain uint64) error { - publicKey, err := bls.PublicKeyFromBytes(pub) - if err != nil { - return errors.Wrap(err, "could not convert bytes to public key") - } - sig, err := bls.SignatureFromBytes(signature) - if err != nil { - return errors.Wrap(err, "could not convert bytes to signature") - } - root, err := ssz.HashTreeRoot(obj) - if err != nil { - return errors.Wrap(err, "could not get signing root") - } - if !sig.Verify(root[:], publicKey, domain) { - return ErrSigFailedToVerify - } - return nil -} - -func verifyBlockRoot(blk *ethpb.BeaconBlock, pub []byte, signature []byte, domain uint64) error { - publicKey, err := bls.PublicKeyFromBytes(pub) - if err != nil { - return errors.Wrap(err, "could not convert bytes to public key") - } - sig, err := bls.SignatureFromBytes(signature) - if err != nil { - return errors.Wrap(err, "could not convert bytes to signature") - } - root, err := stateutil.BlockRoot(blk) - if err != nil { - return errors.Wrap(err, "could not get signing root") - } - if !sig.Verify(root[:], publicKey, domain) { - return ErrSigFailedToVerify - } - return nil -} - // Deprecated: This method uses deprecated ssz.SigningRoot. -func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature []byte, domain uint64) error { +func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature []byte, domain []byte) error { publicKey, err := bls.PublicKeyFromBytes(pub) if err != nil { return errors.Wrap(err, "could not convert bytes to public key") @@ -91,13 +49,21 @@ func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature if err != nil { return errors.Wrap(err, "could not get signing root") } - if !sig.Verify(root[:], publicKey, domain) { - return ErrSigFailedToVerify + sigRoot := &pb.SigningRoot{ + ObjectRoot: root[:], + Domain: domain, + } + ctrRoot, err := ssz.HashTreeRoot(sigRoot) + if err != nil { + return errors.Wrap(err, "could not get container root") + } + if !sig.Verify(ctrRoot[:], publicKey) { + return helpers.ErrSigFailedToVerify } return nil } -func verifySignature(signedData []byte, pub []byte, signature []byte, domain uint64) error { +func verifySignature(signedData []byte, pub []byte, signature []byte, domain []byte) error { publicKey, err := bls.PublicKeyFromBytes(pub) if err != nil { return errors.Wrap(err, "could not convert bytes to public key") @@ -106,8 +72,16 @@ func verifySignature(signedData []byte, pub []byte, signature []byte, domain uin if err != nil { return errors.Wrap(err, "could not convert bytes to signature") } - if !sig.Verify(signedData, publicKey, domain) { - return ErrSigFailedToVerify + ctr := &pb.SigningRoot{ + ObjectRoot: signedData, + Domain: domain, + } + root, err := ssz.HashTreeRoot(ctr) + if err != nil { + return errors.Wrap(err, "could not hash container") + } + if !sig.Verify(root[:], publicKey) { + return helpers.ErrSigFailedToVerify } return nil } @@ -119,7 +93,7 @@ func verifySignature(signedData []byte, pub []byte, signature []byte, domain uin // Official spec definition: // def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: // state.eth1_data_votes.append(body.eth1_data) -// if state.eth1_data_votes.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD: +// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: // state.latest_eth1_data = body.eth1_data func ProcessEth1DataInBlock(beaconState *stateTrie.BeaconState, block *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) { if beaconState == nil { @@ -170,7 +144,6 @@ func Eth1DataHasEnoughSupport(beaconState *stateTrie.BeaconState, data *ethpb.Et if err != nil { return false, errors.Wrap(err, "could not retrieve eth1 data vote cache") } - } if voteCount == 0 { for _, vote := range beaconState.Eth1DataVotes() { @@ -193,7 +166,8 @@ func Eth1DataHasEnoughSupport(beaconState *stateTrie.BeaconState, data *ethpb.Et // If 50+% majority converged on the same eth1data, then it has enough support to update the // state. - return voteCount*2 > params.BeaconConfig().SlotsPerEth1VotingPeriod, nil + support := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch + return voteCount*2 > support, nil } // ProcessBlockHeader validates a block by its header. @@ -203,6 +177,8 @@ func Eth1DataHasEnoughSupport(beaconState *stateTrie.BeaconState, data *ethpb.Et // def process_block_header(state: BeaconState, block: BeaconBlock) -> None: // # Verify that the slots match // assert block.slot == state.slot +// # Verify that proposer index is the correct index +// assert block.proposer_index == get_beacon_proposer_index(state) // # Verify that the parent matches // assert block.parent_root == signing_root(state.latest_block_header) // # Save current block as the new latest block @@ -227,28 +203,29 @@ func ProcessBlockHeader( return nil, err } - idx, err := helpers.BeaconProposerIndex(beaconState) - if err != nil { - return nil, err - } - proposer, err := beaconState.ValidatorAtIndex(idx) - if err != nil { - return nil, err - } - // Verify proposer signature. - currentEpoch := helpers.SlotToEpoch(beaconState.Slot()) - domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer) - if err != nil { + if err := VerifyBlockHeaderSignature(beaconState, block); err != nil { return nil, err } - if err := verifyBlockRoot(block.Block, proposer.PublicKey, block.Signature, domain); err != nil { - return nil, ErrSigFailedToVerify - } return beaconState, nil } +// VerifyBlockHeaderSignature verifies the proposer signature of a beacon block. +func VerifyBlockHeaderSignature(beaconState *stateTrie.BeaconState, block *ethpb.SignedBeaconBlock) error { + proposer, err := beaconState.ValidatorAtIndex(block.Block.ProposerIndex) + if err != nil { + return err + } + + currentEpoch := helpers.SlotToEpoch(beaconState.Slot()) + domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) + if err != nil { + return err + } + return helpers.VerifySigningRoot(block.Block, proposer.PublicKey, block.Signature, domain) +} + // ProcessBlockHeaderNoVerify validates a block by its header but skips proposer // signature verification. // @@ -259,6 +236,8 @@ func ProcessBlockHeader( // def process_block_header(state: BeaconState, block: BeaconBlock) -> None: // # Verify that the slots match // assert block.slot == state.slot +// # Verify that proposer index is the correct index +// assert block.proposer_index == get_beacon_proposer_index(state) // # Verify that the parent matches // assert block.parent_root == signing_root(state.latest_block_header) // # Save current block as the new latest block @@ -280,7 +259,14 @@ func ProcessBlockHeaderNoVerify( return nil, errors.New("nil block") } if beaconState.Slot() != block.Slot { - return nil, fmt.Errorf("state slot: %d is different then block slot: %d", beaconState.Slot(), block.Slot) + return nil, fmt.Errorf("state slot: %d is different than block slot: %d", beaconState.Slot(), block.Slot) + } + idx, err := helpers.BeaconProposerIndex(beaconState) + if err != nil { + return nil, err + } + if block.ProposerIndex != idx { + return nil, fmt.Errorf("proposer index: %d is different than calculated: %d", block.ProposerIndex, idx) } parentRoot, err := stateutil.BlockHeaderRoot(beaconState.LatestBlockHeader()) if err != nil { @@ -293,10 +279,6 @@ func ProcessBlockHeaderNoVerify( block.ParentRoot, parentRoot) } - idx, err := helpers.BeaconProposerIndex(beaconState) - if err != nil { - return nil, err - } proposer, err := beaconState.ValidatorAtIndex(idx) if err != nil { return nil, err @@ -310,10 +292,11 @@ func ProcessBlockHeaderNoVerify( return nil, err } if err := beaconState.SetLatestBlockHeader(ðpb.BeaconBlockHeader{ - Slot: block.Slot, - ParentRoot: block.ParentRoot, - StateRoot: params.BeaconConfig().ZeroHash[:], - BodyRoot: bodyRoot[:], + Slot: block.Slot, + ProposerIndex: block.ProposerIndex, + ParentRoot: block.ParentRoot, + StateRoot: params.BeaconConfig().ZeroHash[:], + BodyRoot: bodyRoot[:], }); err != nil { return nil, err } @@ -353,7 +336,7 @@ func ProcessRandao( buf := make([]byte, 32) binary.LittleEndian.PutUint64(buf, currentEpoch) - domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao) + domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot()) if err != nil { return nil, err } @@ -434,17 +417,14 @@ func ProcessProposerSlashings( if slashing == nil { return nil, errors.New("nil proposer slashings in block body") } - if int(slashing.ProposerIndex) >= beaconState.NumValidators() { - return nil, fmt.Errorf("invalid proposer index given in slashing %d", slashing.ProposerIndex) - } if err = VerifyProposerSlashing(beaconState, slashing); err != nil { return nil, errors.Wrapf(err, "could not verify proposer slashing %d", idx) } beaconState, err = v.SlashValidator( - beaconState, slashing.ProposerIndex, 0, /* proposer is whistleblower */ + beaconState, slashing.Header_1.Header.ProposerIndex, 0, /* proposer is whistleblower */ ) if err != nil { - return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.ProposerIndex) + return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex) } } return beaconState, nil @@ -455,30 +435,33 @@ func VerifyProposerSlashing( beaconState *stateTrie.BeaconState, slashing *ethpb.ProposerSlashing, ) error { - proposer, err := beaconState.ValidatorAtIndex(slashing.ProposerIndex) - if err != nil { - return err - } if slashing.Header_1 == nil || slashing.Header_1.Header == nil || slashing.Header_2 == nil || slashing.Header_2.Header == nil { return errors.New("nil header cannot be verified") } if slashing.Header_1.Header.Slot != slashing.Header_2.Header.Slot { return fmt.Errorf("mismatched header slots, received %d == %d", slashing.Header_1.Header.Slot, slashing.Header_2.Header.Slot) } + if slashing.Header_1.Header.ProposerIndex != slashing.Header_2.Header.ProposerIndex { + return fmt.Errorf("mismatched indices, received %d == %d", slashing.Header_1.Header.ProposerIndex, slashing.Header_2.Header.ProposerIndex) + } if proto.Equal(slashing.Header_1, slashing.Header_2) { return errors.New("expected slashing headers to differ") } + proposer, err := beaconState.ValidatorAtIndex(slashing.Header_1.Header.ProposerIndex) + if err != nil { + return err + } if !helpers.IsSlashableValidator(proposer, helpers.SlotToEpoch(beaconState.Slot())) { return fmt.Errorf("validator with key %#x is not slashable", proposer.PublicKey) } // Using headerEpoch1 here because both of the headers should have the same epoch. - domain, err := helpers.Domain(beaconState.Fork(), helpers.SlotToEpoch(slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer) + domain, err := helpers.Domain(beaconState.Fork(), helpers.SlotToEpoch(slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) if err != nil { return err } headers := []*ethpb.SignedBeaconBlockHeader{slashing.Header_1, slashing.Header_2} for _, header := range headers { - if err := verifySigningRoot(header.Header, proposer.PublicKey, header.Signature, domain); err != nil { + if err := helpers.VerifySigningRoot(header.Header, proposer.PublicKey, header.Signature, domain); err != nil { return errors.Wrap(err, "could not verify beacon block header") } } @@ -596,7 +579,7 @@ func slashableAttesterIndices(slashing *ethpb.AttesterSlashing) []uint64 { return nil } indices1 := slashing.Attestation_1.AttestingIndices - indices2 := slashing.Attestation_1.AttestingIndices + indices2 := slashing.Attestation_2.AttestingIndices return sliceutil.IntersectionUint64(indices1, indices2) } @@ -827,30 +810,25 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *stateTrie.Beacon return errors.New("attesting indices is not uniquely sorted") } - domain, err := helpers.Domain(beaconState.Fork(), indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(beaconState.Fork(), indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) if err != nil { return err } - var pubkey *bls.PublicKey + pubkeys := []*bls.PublicKey{} if len(indices) > 0 { - pubkeyAtIdx := beaconState.PubkeyAtIndex(indices[0]) - pubkey, err = bls.PublicKeyFromBytes(pubkeyAtIdx[:]) - if err != nil { - return errors.Wrap(err, "could not deserialize validator public key") - } - for i := 1; i < len(indices); i++ { - pubkeyAtIdx = beaconState.PubkeyAtIndex(indices[i]) + for i := 0; i < len(indices); i++ { + pubkeyAtIdx := beaconState.PubkeyAtIndex(indices[i]) pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:]) if err != nil { return errors.Wrap(err, "could not deserialize validator public key") } - pubkey.Aggregate(pk) + pubkeys = append(pubkeys, pk) } } - messageHash, err := ssz.HashTreeRoot(indexedAtt.Data) + messageHash, err := helpers.ComputeSigningRoot(indexedAtt.Data, domain) if err != nil { - return errors.Wrap(err, "could not tree hash att data") + return errors.Wrap(err, "could not get signing root of object") } sig, err := bls.SignatureFromBytes(indexedAtt.Signature) @@ -859,8 +837,8 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *stateTrie.Beacon } voted := len(indices) > 0 - if voted && !sig.Verify(messageHash[:], pubkey, domain) { - return ErrSigFailedToVerify + if voted && !sig.FastAggregateVerify(pubkeys, messageHash) { + return helpers.ErrSigFailedToVerify } return nil } @@ -1002,7 +980,10 @@ func ProcessDeposit( index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey)) numVals := beaconState.NumValidators() if !ok { - domain := bls.ComputeDomain(params.BeaconConfig().DomainDeposit) + domain, err := helpers.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil) + if err != nil { + return nil, err + } depositSig := deposit.Data.Signature if err := verifyDepositDataSigningRoot(deposit.Data, pubKey, depositSig, domain); err != nil { // Ignore this error as in the spec pseudo code. @@ -1112,7 +1093,7 @@ func ProcessVoluntaryExits( if err != nil { return nil, err } - if err := VerifyExit(val, beaconState.Slot(), beaconState.Fork(), exit); err != nil { + if err := VerifyExit(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorRoot()); err != nil { return nil, errors.Wrapf(err, "could not verify exit %d", idx) } beaconState, err = v.InitiateValidatorExit(beaconState, exit.Exit.ValidatorIndex) @@ -1163,7 +1144,7 @@ func ProcessVoluntaryExitsNoVerify( // # Verify signature // domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, exit.epoch) // assert bls_verify(validator.pubkey, signing_root(exit), exit.signature, domain) -func VerifyExit(validator *ethpb.Validator, currentSlot uint64, fork *pb.Fork, signed *ethpb.SignedVoluntaryExit) error { +func VerifyExit(validator *ethpb.Validator, currentSlot uint64, fork *pb.Fork, signed *ethpb.SignedVoluntaryExit, genesisRoot []byte) error { if signed == nil || signed.Exit == nil { return errors.New("nil exit") } @@ -1190,12 +1171,12 @@ func VerifyExit(validator *ethpb.Validator, currentSlot uint64, fork *pb.Fork, s validator.ActivationEpoch+params.BeaconConfig().PersistentCommitteePeriod, ) } - domain, err := helpers.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit) + domain, err := helpers.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot) if err != nil { return err } - if err := verifySigningRoot(exit, validator.PublicKey, signed.Signature, domain); err != nil { - return ErrSigFailedToVerify + if err := helpers.VerifySigningRoot(exit, validator.PublicKey, signed.Signature, domain); err != nil { + return helpers.ErrSigFailedToVerify } return nil } diff --git a/beacon-chain/core/blocks/block_operations_fuzz_test.go b/beacon-chain/core/blocks/block_operations_fuzz_test.go index 9f404ddb4f..a83c586297 100644 --- a/beacon-chain/core/blocks/block_operations_fuzz_test.go +++ b/beacon-chain/core/blocks/block_operations_fuzz_test.go @@ -4,12 +4,11 @@ import ( "context" "testing" + fuzz "github.com/google/gofuzz" eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" - - fuzz "github.com/google/gofuzz" - "github.com/prysmaticlabs/prysm/shared/bytesutil" + "github.com/prysmaticlabs/prysm/shared/params" //"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" @@ -54,35 +53,6 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) { } } -func TestFuzzverifySigningRoot_10000(t *testing.T) { - fuzzer := fuzz.NewWithSeed(0) - state := ðereum_beacon_p2p_v1.BeaconState{} - pubkey := [48]byte{} - sig := [96]byte{} - domain := [4]byte{} - p := []byte{} - s := []byte{} - d := uint64(0) - for i := 0; i < 10000; i++ { - fuzzer.Fuzz(state) - fuzzer.Fuzz(&pubkey) - fuzzer.Fuzz(&sig) - fuzzer.Fuzz(&domain) - fuzzer.Fuzz(state) - fuzzer.Fuzz(&p) - fuzzer.Fuzz(&s) - fuzzer.Fuzz(&d) - domain := bytesutil.FromBytes4(domain[:]) - if err := verifySigningRoot(state, pubkey[:], sig[:], domain); err != nil { - t.Log(err) - } - if err := verifySigningRoot(state, p, s, d); err != nil { - t.Log(err) - } - - } -} - func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) { fuzzer := fuzz.NewWithSeed(0) ba := []byte{} @@ -91,7 +61,7 @@ func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) { domain := [4]byte{} p := []byte{} s := []byte{} - d := uint64(0) + d := []byte{} for i := 0; i < 10000; i++ { fuzzer.Fuzz(&ba) fuzzer.Fuzz(&pubkey) @@ -100,13 +70,13 @@ func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) { fuzzer.Fuzz(&p) fuzzer.Fuzz(&s) fuzzer.Fuzz(&d) - domain := bytesutil.FromBytes4(domain[:]) - if err := verifySignature(ba, pubkey[:], sig[:], domain); err != nil { + if err := verifySignature(ba, pubkey[:], sig[:], domain[:]); err != nil { t.Log(err) } if err := verifySignature(ba, p, s, d); err != nil { t.Log(err) } + } } @@ -525,7 +495,7 @@ func TestFuzzVerifyExit_10000(t *testing.T) { fuzzer.Fuzz(val) fuzzer.Fuzz(fork) fuzzer.Fuzz(&slot) - if err := VerifyExit(val, slot, fork, ve); err != nil { + if err := VerifyExit(val, slot, fork, ve, params.BeaconConfig().ZeroHash[:]); err != nil { t.Log(err) } } diff --git a/beacon-chain/core/blocks/block_operations_test.go b/beacon-chain/core/blocks/block_operations_test.go index 3ae40b6cbf..80a138c899 100644 --- a/beacon-chain/core/blocks/block_operations_test.go +++ b/beacon-chain/core/blocks/block_operations_test.go @@ -17,9 +17,11 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/attestationutil" "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/testutil" "github.com/prysmaticlabs/prysm/shared/trieutil" @@ -37,7 +39,7 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) { t.Fatal(err) } - lbhsr, err := ssz.HashTreeRoot(beaconState.LatestBlockHeader()) + lbhdr, err := stateutil.BlockHeaderRoot(beaconState.LatestBlockHeader()) if err != nil { t.Error(err) } @@ -49,22 +51,23 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) { block := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ - Slot: 0, + ProposerIndex: proposerIdx, + Slot: 0, Body: ðpb.BeaconBlockBody{ RandaoReveal: []byte{'A', 'B', 'C'}, }, - ParentRoot: lbhsr[:], + ParentRoot: lbhdr[:], }, } - signingRoot, err := ssz.HashTreeRoot(block.Block) - if err != nil { - t.Fatalf("Failed to get signing root of block: %v", err) - } - dt, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer) + dt, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatalf("Failed to get domain form state: %v", err) } - blockSig := privKeys[proposerIdx+1].Sign(signingRoot[:], dt) + signingRoot, err := helpers.ComputeSigningRoot(block.Block, dt) + if err != nil { + t.Fatalf("Failed to get signing root of block: %v", err) + } + blockSig := privKeys[proposerIdx+1].Sign(signingRoot[:]) block.Signature = blockSig.Marshal()[:] _, err = blocks.ProcessBlockHeader(beaconState, block) @@ -103,12 +106,16 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) { t.Error(err) } currentEpoch := helpers.CurrentEpoch(state) - dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer) + dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot()) if err != nil { t.Fatalf("Failed to get domain form state: %v", err) } priv := bls.RandKey() - blockSig := priv.Sign([]byte("hello"), dt) + root, err := helpers.ComputeSigningRoot([]byte("hello"), dt) + if err != nil { + t.Error(err) + } + blockSig := priv.Sign(root[:]) validators[5896].PublicKey = priv.PublicKey().Marshal() block := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ @@ -122,7 +129,7 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) { } _, err = blocks.ProcessBlockHeader(state, block) - want := "is different then block slot" + want := "is different than block slot" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Expected %v, received %v", want, err) } @@ -152,16 +159,21 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) { } currentEpoch := helpers.CurrentEpoch(state) - dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer) + dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot()) if err != nil { t.Fatalf("Failed to get domain form state: %v", err) } priv := bls.RandKey() - blockSig := priv.Sign([]byte("hello"), dt) + root, err := helpers.ComputeSigningRoot([]byte("hello"), dt) + if err != nil { + t.Error(err) + } + blockSig := priv.Sign(root[:]) validators[5896].PublicKey = priv.PublicKey().Marshal() block := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ - Slot: 0, + ProposerIndex: 5669, + Slot: 0, Body: ðpb.BeaconBlockBody{ RandaoReveal: []byte{'A', 'B', 'C'}, }, @@ -200,21 +212,26 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) { t.Fatal(err) } - parentRoot, err := ssz.HashTreeRoot(state.LatestBlockHeader()) + parentRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader()) if err != nil { t.Error(err) } currentEpoch := helpers.CurrentEpoch(state) - dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer) + dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot()) if err != nil { t.Fatalf("Failed to get domain form state: %v", err) } priv := bls.RandKey() - blockSig := priv.Sign([]byte("hello"), dt) + root, err := helpers.ComputeSigningRoot([]byte("hello"), dt) + if err != nil { + t.Error(err) + } + blockSig := priv.Sign(root[:]) validators[12683].PublicKey = priv.PublicKey().Marshal() block := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ - Slot: 0, + ProposerIndex: 5669, + Slot: 0, Body: ðpb.BeaconBlockBody{ RandaoReveal: []byte{'A', 'B', 'C'}, }, @@ -253,30 +270,32 @@ func TestProcessBlockHeader_OK(t *testing.T) { t.Fatal(err) } - latestBlockSignedRoot, err := ssz.HashTreeRoot(state.LatestBlockHeader()) + latestBlockSignedRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader()) if err != nil { t.Error(err) } + currentEpoch := helpers.CurrentEpoch(state) - dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer) + dt, err := helpers.Domain(state.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, state.GenesisValidatorRoot()) if err != nil { t.Fatalf("Failed to get domain form state: %v", err) } priv := bls.RandKey() block := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ - Slot: 0, + ProposerIndex: 5669, + Slot: 0, Body: ðpb.BeaconBlockBody{ RandaoReveal: []byte{'A', 'B', 'C'}, }, ParentRoot: latestBlockSignedRoot[:], }, } - signingRoot, err := ssz.HashTreeRoot(block.Block) + signingRoot, err := helpers.ComputeSigningRoot(block.Block, dt) if err != nil { t.Fatalf("Failed to get signing root of block: %v", err) } - blockSig := priv.Sign(signingRoot[:], dt) + blockSig := priv.Sign(signingRoot[:]) block.Signature = blockSig.Marshal()[:] bodyRoot, err := ssz.HashTreeRoot(block.Block.Body) if err != nil { @@ -301,10 +320,11 @@ func TestProcessBlockHeader_OK(t *testing.T) { var zeroHash [32]byte nsh := newState.LatestBlockHeader() expected := ðpb.BeaconBlockHeader{ - Slot: block.Block.Slot, - ParentRoot: latestBlockSignedRoot[:], - BodyRoot: bodyRoot[:], - StateRoot: zeroHash[:], + ProposerIndex: 5669, + Slot: block.Block.Slot, + ParentRoot: latestBlockSignedRoot[:], + BodyRoot: bodyRoot[:], + StateRoot: zeroHash[:], } if !proto.Equal(nsh, expected) { t.Errorf("Expected %v, received %v", expected, nsh) @@ -321,12 +341,16 @@ func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) { epoch := uint64(0) buf := make([]byte, 32) binary.LittleEndian.PutUint64(buf, epoch) - domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao) + domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + root, err := ssz.HashTreeRoot(&pb.SigningRoot{ObjectRoot: buf, Domain: domain}) if err != nil { t.Fatal(err) } // We make the previous validator's index sign the message instead of the proposer. - epochSignature := privKeys[proposerIdx-1].Sign(buf, domain) + epochSignature := privKeys[proposerIdx-1].Sign(root[:]) block := ðpb.BeaconBlock{ Body: ðpb.BeaconBlockBody{ RandaoReveal: epochSignature.Marshal(), @@ -391,7 +415,9 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) { }, }, } - for i := uint64(0); i < params.BeaconConfig().SlotsPerEth1VotingPeriod; i++ { + + period := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch + for i := uint64(0); i < period; i++ { beaconState, err = blocks.ProcessEth1DataInBlock(beaconState, block) if err != nil { t.Fatal(err) @@ -415,15 +441,16 @@ func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) { currentSlot := uint64(0) slashings := []*ethpb.ProposerSlashing{ { - ProposerIndex: 1, Header_1: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: params.BeaconConfig().SlotsPerEpoch + 1, + ProposerIndex: 1, + Slot: params.BeaconConfig().SlotsPerEpoch + 1, }, }, Header_2: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, + ProposerIndex: 1, + Slot: 0, }, }, }, @@ -449,15 +476,16 @@ func TestProcessProposerSlashings_SameHeaders(t *testing.T) { currentSlot := uint64(0) slashings := []*ethpb.ProposerSlashing{ { - ProposerIndex: 1, Header_1: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, + ProposerIndex: 1, + Slot: 0, }, }, Header_2: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, + ProposerIndex: 1, + Slot: 0, }, }, }, @@ -490,16 +518,17 @@ func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) { currentSlot := uint64(0) slashings := []*ethpb.ProposerSlashing{ { - ProposerIndex: 0, Header_1: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, + ProposerIndex: 0, + Slot: 0, }, Signature: []byte("A"), }, Header_2: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, + ProposerIndex: 0, + Slot: 0, }, Signature: []byte("B"), }, @@ -535,39 +564,40 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) { beaconState, privKeys := testutil.DeterministicGenesisState(t, 100) proposerIdx := uint64(1) - domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer) + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } header1 := ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, - StateRoot: []byte("A"), + ProposerIndex: proposerIdx, + Slot: 0, + StateRoot: []byte("A"), }, } - signingRoot, err := ssz.HashTreeRoot(header1.Header) + signingRoot, err := helpers.ComputeSigningRoot(header1.Header, domain) if err != nil { t.Errorf("Could not get signing root of beacon block header: %v", err) } - header1.Signature = privKeys[proposerIdx].Sign(signingRoot[:], domain).Marshal()[:] + header1.Signature = privKeys[proposerIdx].Sign(signingRoot[:]).Marshal()[:] header2 := ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, - StateRoot: []byte("B"), + ProposerIndex: proposerIdx, + Slot: 0, + StateRoot: []byte("B"), }, } - signingRoot, err = ssz.HashTreeRoot(header2.Header) + signingRoot, err = helpers.ComputeSigningRoot(header2.Header, domain) if err != nil { t.Errorf("Could not get signing root of beacon block header: %v", err) } - header2.Signature = privKeys[proposerIdx].Sign(signingRoot[:], domain).Marshal()[:] + header2.Signature = privKeys[proposerIdx].Sign(signingRoot[:]).Marshal()[:] slashings := []*ethpb.ProposerSlashing{ { - ProposerIndex: proposerIdx, - Header_1: header1, - Header_2: header2, + Header_1: header1, + Header_2: header2, }, } @@ -706,16 +736,16 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) { }, AttestingIndices: []uint64{0, 1}, } - hashTreeRoot, err := ssz.HashTreeRoot(att1.Data) - if err != nil { - t.Error(err) - } - domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - sig0 := privKeys[0].Sign(hashTreeRoot[:], domain) - sig1 := privKeys[1].Sign(hashTreeRoot[:], domain) + signingRoot, err := helpers.ComputeSigningRoot(att1.Data, domain) + if err != nil { + t.Errorf("Could not get signing root of beacon block header: %v", err) + } + sig0 := privKeys[0].Sign(signingRoot[:]) + sig1 := privKeys[1].Sign(signingRoot[:]) aggregateSig := bls.AggregateSignatures([]*bls.Signature{sig0, sig1}) att1.Signature = aggregateSig.Marshal()[:] @@ -726,12 +756,12 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) { }, AttestingIndices: []uint64{0, 1}, } - hashTreeRoot, err = ssz.HashTreeRoot(att2.Data) + signingRoot, err = helpers.ComputeSigningRoot(att2.Data, domain) if err != nil { - t.Error(err) + t.Errorf("Could not get signing root of beacon block header: %v", err) } - sig0 = privKeys[0].Sign(hashTreeRoot[:], domain) - sig1 = privKeys[1].Sign(hashTreeRoot[:], domain) + sig0 = privKeys[0].Sign(signingRoot[:]) + sig1 = privKeys[1].Sign(signingRoot[:]) aggregateSig = bls.AggregateSignatures([]*bls.Signature{sig0, sig1}) att2.Signature = aggregateSig.Marshal()[:] @@ -1020,17 +1050,17 @@ func TestProcessAttestations_OK(t *testing.T) { if err != nil { t.Error(err) } - hashTreeRoot, err := ssz.HashTreeRoot(att.Data) - if err != nil { - t.Error(err) - } - domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } + hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, domain) + if err != nil { + t.Error(err) + } sigs := make([]*bls.Signature, len(attestingIndices)) for i, indice := range attestingIndices { - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } att.Signature = bls.AggregateSignatures(sigs).Marshal()[:] @@ -1054,7 +1084,7 @@ func TestProcessAttestations_OK(t *testing.T) { func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) { beaconState, privKeys := testutil.DeterministicGenesisState(t, 100) - domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } @@ -1088,13 +1118,13 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) { if err != nil { t.Fatal(err) } - hashTreeRoot, err := ssz.HashTreeRoot(att1.Data) + hashTreeRoot, err := helpers.ComputeSigningRoot(att1.Data, domain) if err != nil { - t.Fatal(err) + t.Error(err) } sigs := make([]*bls.Signature, len(attestingIndices1)) for i, indice := range attestingIndices1 { - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } att1.Signature = bls.AggregateSignatures(sigs).Marshal()[:] @@ -1116,13 +1146,13 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) { if err != nil { t.Fatal(err) } - hashTreeRoot, err = ssz.HashTreeRoot(data) + hashTreeRoot, err = helpers.ComputeSigningRoot(data, domain) if err != nil { - t.Fatal(err) + t.Error(err) } sigs = make([]*bls.Signature, len(attestingIndices2)) for i, indice := range attestingIndices2 { - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } att2.Signature = bls.AggregateSignatures(sigs).Marshal()[:] @@ -1135,7 +1165,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) { func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) { beaconState, privKeys := testutil.DeterministicGenesisState(t, 300) - domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } @@ -1170,13 +1200,13 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) { if err != nil { t.Fatal(err) } - hashTreeRoot, err := ssz.HashTreeRoot(data) + hashTreeRoot, err := helpers.ComputeSigningRoot(data, domain) if err != nil { - t.Fatal(err) + t.Error(err) } sigs := make([]*bls.Signature, len(attestingIndices1)) for i, indice := range attestingIndices1 { - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } att1.Signature = bls.AggregateSignatures(sigs).Marshal()[:] @@ -1197,13 +1227,13 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) { if err != nil { t.Fatal(err) } - hashTreeRoot, err = ssz.HashTreeRoot(data) + hashTreeRoot, err = helpers.ComputeSigningRoot(data, domain) if err != nil { - t.Fatal(err) + t.Error(err) } sigs = make([]*bls.Signature, len(attestingIndices2)) for i, indice := range attestingIndices2 { - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } att2.Signature = bls.AggregateSignatures(sigs).Marshal()[:] @@ -1412,18 +1442,17 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) { } for _, tt := range tests { - domain, err := helpers.Domain(state.Fork(), tt.attestation.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(state.Fork(), tt.attestation.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - root, err := ssz.HashTreeRoot(tt.attestation.Data) + root, err := helpers.ComputeSigningRoot(tt.attestation.Data, domain) if err != nil { - t.Errorf("Could not find the ssz root: %v", err) - continue + t.Error(err) } var sig []*bls.Signature for _, idx := range tt.attestation.AttestingIndices { - validatorSig := keys[idx].Sign(root[:], domain) + validatorSig := keys[idx].Sign(root[:]) sig = append(sig, validatorSig) } aggSig := bls.AggregateSignatures(sig) @@ -1604,11 +1633,11 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) Amount: 1000, }, } - sr, err := ssz.HashTreeRoot(deposit.Data) + sr, err := helpers.ComputeSigningRoot(deposit.Data, bytesutil.ToBytes(3, 8)) if err != nil { t.Fatal(err) } - sig := sk.Sign(sr[:], 3) + sig := sk.Sign(sr[:]) deposit.Data.Signature = sig.Marshal() leaf, err := ssz.HashTreeRoot(deposit.Data) if err != nil { @@ -1915,15 +1944,15 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) { if err := state.UpdateValidatorAtIndex(0, val); err != nil { t.Fatal(err) } - signingRoot, err := ssz.HashTreeRoot(exits[0].Exit) - if err != nil { - t.Error(err) - } - domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainVoluntaryExit) + domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainVoluntaryExit, state.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - sig := priv.Sign(signingRoot[:], domain) + signingRoot, err := helpers.ComputeSigningRoot(exits[0].Exit, domain) + if err != nil { + t.Error(err) + } + sig := priv.Sign(signingRoot[:]) exits[0].Signature = sig.Marshal() block := ðpb.BeaconBlock{ Body: ðpb.BeaconBlockBody{ diff --git a/beacon-chain/core/blocks/block_regression_test.go b/beacon-chain/core/blocks/block_regression_test.go new file mode 100644 index 0000000000..a52a8f8a4b --- /dev/null +++ b/beacon-chain/core/blocks/block_regression_test.go @@ -0,0 +1,113 @@ +package blocks_test + +import ( + "context" + "testing" + + ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/shared/params" + "github.com/prysmaticlabs/prysm/shared/testutil" +) + +func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) { + testutil.ResetCache() + beaconState, privKeys := testutil.DeterministicGenesisState(t, 5500) + for _, vv := range beaconState.Validators() { + vv.WithdrawableEpoch = 1 * params.BeaconConfig().SlotsPerEpoch + } + // This set of indices is very similar to the one from our sapphire testnet + // when close to 100 validators were incorrectly slashed. The set is from 0 -5500, + // instead of 55000 as it would take too long to generate a state. + setA := []uint64{21, 92, 236, 244, 281, 321, 510, 524, + 538, 682, 828, 858, 913, 920, 922, 959, 1176, 1207, + 1222, 1229, 1354, 1394, 1436, 1454, 1510, 1550, + 1552, 1576, 1645, 1704, 1842, 1967, 2076, 2111, 2134, 2307, + 2343, 2354, 2417, 2524, 2532, 2555, 2740, 2749, 2759, 2762, + 2800, 2809, 2824, 2987, 3110, 3125, 3559, 3583, 3599, 3608, + 3657, 3685, 3723, 3756, 3759, 3761, 3820, 3826, 3979, 4030, + 4141, 4170, 4205, 4247, 4257, 4479, 4492, 4569, 5091, + } + // Only 2800 is the slashable index. + setB := []uint64{1361, 1438, 2383, 2800} + expectedSlashedVal := 2800 + + root1 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '1'} + att1 := ðpb.IndexedAttestation{ + Data: ðpb.AttestationData{ + Source: ðpb.Checkpoint{Epoch: 0}, + Target: ðpb.Checkpoint{Epoch: 0, Root: root1[:]}, + }, + AttestingIndices: setA, + } + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + signingRoot, err := helpers.ComputeSigningRoot(att1.Data, domain) + if err != nil { + t.Errorf("Could not get signing root of beacon block header: %v", err) + } + aggSigs := []*bls.Signature{} + for _, index := range setA { + sig := privKeys[index].Sign(signingRoot[:]) + aggSigs = append(aggSigs, sig) + } + aggregateSig := bls.AggregateSignatures(aggSigs) + att1.Signature = aggregateSig.Marshal()[:] + + root2 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '2'} + att2 := ðpb.IndexedAttestation{ + Data: ðpb.AttestationData{ + Source: ðpb.Checkpoint{Epoch: 0}, + Target: ðpb.Checkpoint{Epoch: 0, Root: root2[:]}, + }, + AttestingIndices: setB, + } + signingRoot, err = helpers.ComputeSigningRoot(att2.Data, domain) + if err != nil { + t.Errorf("Could not get signing root of beacon block header: %v", err) + } + aggSigs = []*bls.Signature{} + for _, index := range setB { + sig := privKeys[index].Sign(signingRoot[:]) + aggSigs = append(aggSigs, sig) + } + aggregateSig = bls.AggregateSignatures(aggSigs) + att2.Signature = aggregateSig.Marshal()[:] + + slashings := []*ethpb.AttesterSlashing{ + { + Attestation_1: att1, + Attestation_2: att2, + }, + } + + currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch + if err := beaconState.SetSlot(currentSlot); err != nil { + t.Fatal(err) + } + + block := ðpb.BeaconBlock{ + Body: ðpb.BeaconBlockBody{ + AttesterSlashings: slashings, + }, + } + + newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, block.Body) + if err != nil { + t.Fatal(err) + } + newRegistry := newState.Validators() + if !newRegistry[expectedSlashedVal].Slashed { + t.Errorf("Validator with index %d was not slashed despite performing a double vote", expectedSlashedVal) + } + + for idx, val := range newRegistry { + if val.Slashed && idx != expectedSlashedVal { + t.Errorf("validator with index: %d was unintentionally slashed", idx) + } + } +} diff --git a/beacon-chain/core/blocks/eth1_data_test.go b/beacon-chain/core/blocks/eth1_data_test.go index f5648d80a9..cd1406a795 100644 --- a/beacon-chain/core/blocks/eth1_data_test.go +++ b/beacon-chain/core/blocks/eth1_data_test.go @@ -11,6 +11,17 @@ import ( "github.com/prysmaticlabs/prysm/shared/params" ) +func FakeDeposits(n int) []*ethpb.Eth1Data { + deposits := make([]*ethpb.Eth1Data, n) + for i := 0; i < n; i++ { + deposits[i] = ðpb.Eth1Data{ + DepositCount: 1, + DepositRoot: []byte("root"), + } + } + return deposits +} + func TestEth1DataHasEnoughSupport(t *testing.T) { tests := []struct { stateVotes []*ethpb.Eth1Data @@ -19,21 +30,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) { votingPeriodLength uint64 }{ { - stateVotes: []*ethpb.Eth1Data{ - { - DepositCount: 1, - DepositRoot: []byte("root"), - }, { - DepositCount: 1, - DepositRoot: []byte("root"), - }, { - DepositCount: 1, - DepositRoot: []byte("root"), - }, { - DepositCount: 1, - DepositRoot: []byte("root"), - }, - }, + stateVotes: FakeDeposits(4 * int(params.BeaconConfig().SlotsPerEpoch)), data: ðpb.Eth1Data{ DepositCount: 1, DepositRoot: []byte("root"), @@ -41,21 +38,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) { hasSupport: true, votingPeriodLength: 7, }, { - stateVotes: []*ethpb.Eth1Data{ - { - DepositCount: 1, - DepositRoot: []byte("root"), - }, { - DepositCount: 1, - DepositRoot: []byte("root"), - }, { - DepositCount: 1, - DepositRoot: []byte("root"), - }, { - DepositCount: 1, - DepositRoot: []byte("root"), - }, - }, + stateVotes: FakeDeposits(4 * int(params.BeaconConfig().SlotsPerEpoch)), data: ðpb.Eth1Data{ DepositCount: 1, DepositRoot: []byte("root"), @@ -63,21 +46,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) { hasSupport: false, votingPeriodLength: 8, }, { - stateVotes: []*ethpb.Eth1Data{ - { - DepositCount: 1, - DepositRoot: []byte("root"), - }, { - DepositCount: 1, - DepositRoot: []byte("root"), - }, { - DepositCount: 1, - DepositRoot: []byte("root"), - }, { - DepositCount: 1, - DepositRoot: []byte("root"), - }, - }, + stateVotes: FakeDeposits(4 * int(params.BeaconConfig().SlotsPerEpoch)), data: ðpb.Eth1Data{ DepositCount: 1, DepositRoot: []byte("root"), @@ -90,7 +59,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) { for i, tt := range tests { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { c := params.BeaconConfig() - c.SlotsPerEth1VotingPeriod = tt.votingPeriodLength + c.EpochsPerEth1VotingPeriod = tt.votingPeriodLength params.OverrideBeaconConfig(c) s, err := beaconstate.InitializeFromProto(&pb.BeaconState{ @@ -106,8 +75,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) { if result != tt.hasSupport { t.Errorf( - "blocks.Eth1DataHasEnoughSupport(%+v, %+v) = %t, wanted %t", - s, + "blocks.Eth1DataHasEnoughSupport(%+v) = %t, wanted %t", tt.data, result, tt.hasSupport, diff --git a/beacon-chain/core/blocks/spectest/block_processing_test.go b/beacon-chain/core/blocks/spectest/block_processing_test.go index 5268e2ff95..6f5b613b56 100644 --- a/beacon-chain/core/blocks/spectest/block_processing_test.go +++ b/beacon-chain/core/blocks/spectest/block_processing_test.go @@ -98,8 +98,8 @@ func runBlockProcessingTest(t *testing.T, config string) { t.Fatalf("Failed to unmarshal: %v", err) } - if !proto.Equal(beaconState.CloneInnerState(), postBeaconState) { - diff, _ := messagediff.PrettyDiff(beaconState.CloneInnerState(), postBeaconState) + if !proto.Equal(beaconState.InnerStateUnsafe(), postBeaconState) { + diff, _ := messagediff.PrettyDiff(beaconState.InnerStateUnsafe(), postBeaconState) t.Log(diff) t.Fatal("Post state does not match expected") } diff --git a/beacon-chain/core/epoch/epoch_processing.go b/beacon-chain/core/epoch/epoch_processing.go index 94dff9123b..811e0ec4c7 100644 --- a/beacon-chain/core/epoch/epoch_processing.go +++ b/beacon-chain/core/epoch/epoch_processing.go @@ -194,15 +194,18 @@ func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, err // current_epoch = get_current_epoch(state) // next_epoch = Epoch(current_epoch + 1) // # Reset eth1 data votes -// if (state.slot + 1) % SLOTS_PER_ETH1_VOTING_PERIOD == 0: +// if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: // state.eth1_data_votes = [] // # Update effective balances with hysteresis // for index, validator in enumerate(state.validators): // balance = state.balances[index] -// HALF_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // 2 -// if balance < validator.effective_balance or validator.effective_balance + 3 * HALF_INCREMENT < balance: -// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) -// # Set active index root +// HYSTERESIS_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT +// DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER +// UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER +// if ( +// balance + DOWNWARD_THRESHOLD < validator.effective_balance +// or validator.effective_balance + UPWARD_THRESHOLD < balance +// ): // index_epoch = Epoch(next_epoch + ACTIVATION_EXIT_DELAY) // index_root_position = index_epoch % EPOCHS_PER_HISTORICAL_VECTOR // indices_list = List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT](get_active_validator_indices(state, index_epoch)) @@ -228,7 +231,7 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState, nextEpoch := currentEpoch + 1 // Reset ETH1 data votes. - if (state.Slot()+1)%params.BeaconConfig().SlotsPerEth1VotingPeriod == 0 { + if nextEpoch%params.BeaconConfig().EpochsPerEth1VotingPeriod == 0 { if err := state.SetEth1DataVotes([]*ethpb.Eth1Data{}); err != nil { return nil, err } @@ -244,8 +247,11 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState, return false, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances())) } balance := bals[idx] - halfInc := params.BeaconConfig().EffectiveBalanceIncrement / 2 - if balance < val.EffectiveBalance || val.EffectiveBalance+3*halfInc < balance { + hysteresisInc := params.BeaconConfig().EffectiveBalanceIncrement / params.BeaconConfig().HysteresisQuotient + downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier + upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier + + if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance { val.EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance if val.EffectiveBalance > balance-balance%params.BeaconConfig().EffectiveBalanceIncrement { val.EffectiveBalance = balance - balance%params.BeaconConfig().EffectiveBalanceIncrement diff --git a/beacon-chain/core/epoch/epoch_processing_test.go b/beacon-chain/core/epoch/epoch_processing_test.go index 329e037926..055f770deb 100644 --- a/beacon-chain/core/epoch/epoch_processing_test.go +++ b/beacon-chain/core/epoch/epoch_processing_test.go @@ -317,10 +317,12 @@ func TestProcessFinalUpdates_CanProcess(t *testing.T) { t.Fatal(err) } balances := s.Balances() - balances[0] = 29 * 1e9 + balances[0] = 31.75 * 1e9 + balances[1] = 31.74 * 1e9 if err := s.SetBalances(balances); err != nil { t.Fatal(err) } + slashings := s.Slashings() slashings[ce] = 0 if err := s.SetSlashings(slashings); err != nil { @@ -337,9 +339,12 @@ func TestProcessFinalUpdates_CanProcess(t *testing.T) { } // Verify effective balance is correctly updated. - if newS.Validators()[0].EffectiveBalance != 29*1e9 { + if newS.Validators()[0].EffectiveBalance != params.BeaconConfig().MaxEffectiveBalance { t.Errorf("effective balance incorrectly updated, got %d", s.Validators()[0].EffectiveBalance) } + if newS.Validators()[1].EffectiveBalance != 31*1e9 { + t.Errorf("effective balance incorrectly updated, got %d", s.Validators()[1].EffectiveBalance) + } // Verify slashed balances correctly updated. if newS.Slashings()[ce] != newS.Slashings()[ne] { diff --git a/beacon-chain/core/epoch/precompute/reward_penalty.go b/beacon-chain/core/epoch/precompute/reward_penalty.go index 1b34cbb142..b3a076992b 100644 --- a/beacon-chain/core/epoch/precompute/reward_penalty.go +++ b/beacon-chain/core/epoch/precompute/reward_penalty.go @@ -83,7 +83,9 @@ func attestationDelta(state *stateTrie.BeaconState, bp *Balance, v *Validator) ( // Process source reward / penalty if v.IsPrevEpochAttester && !v.IsSlashed { - r += br * bp.PrevEpochAttesters / bp.CurrentEpoch + inc := params.BeaconConfig().EffectiveBalanceIncrement + rewardNumerator := br * bp.PrevEpochAttesters / inc + r += rewardNumerator / (bp.CurrentEpoch / inc) proposerReward := br / params.BeaconConfig().ProposerRewardQuotient maxAtteserReward := br - proposerReward r += maxAtteserReward / v.InclusionDistance @@ -93,14 +95,18 @@ func attestationDelta(state *stateTrie.BeaconState, bp *Balance, v *Validator) ( // Process target reward / penalty if v.IsPrevEpochTargetAttester && !v.IsSlashed { - r += br * bp.PrevEpochTargetAttesters / bp.CurrentEpoch + inc := params.BeaconConfig().EffectiveBalanceIncrement + rewardNumerator := br * bp.PrevEpochAttesters / inc + r += rewardNumerator / (bp.CurrentEpoch / inc) } else { p += br } // Process head reward / penalty if v.IsPrevEpochHeadAttester && !v.IsSlashed { - r += br * bp.PrevEpochHeadAttesters / bp.CurrentEpoch + inc := params.BeaconConfig().EffectiveBalanceIncrement + rewardNumerator := br * bp.PrevEpochAttesters / inc + r += rewardNumerator / (bp.CurrentEpoch / inc) } else { p += br } diff --git a/beacon-chain/core/feed/state/events.go b/beacon-chain/core/feed/state/events.go index 24b9e54ebb..6789267002 100644 --- a/beacon-chain/core/feed/state/events.go +++ b/beacon-chain/core/feed/state/events.go @@ -31,4 +31,6 @@ type ChainStartedData struct { type InitializedData struct { // StartTime is the time at which the chain started. StartTime time.Time + // GenesisValidatorsRoot represents ssz.HashTreeRoot(state.validators). + GenesisValidatorsRoot []byte } diff --git a/beacon-chain/core/helpers/BUILD.bazel b/beacon-chain/core/helpers/BUILD.bazel index ff4df28b37..15c3639705 100644 --- a/beacon-chain/core/helpers/BUILD.bazel +++ b/beacon-chain/core/helpers/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "randao.go", "rewards_penalties.go", "shuffle.go", + "signing_root.go", "slot_epoch.go", "validators.go", ], @@ -17,9 +18,12 @@ go_library( "//beacon-chain:__subpackages__", "//shared/benchutil/benchmark_files:__subpackages__", "//shared/testutil:__pkg__", + "//shared/keystore:__pkg__", + "//shared/interop:__pkg__", "//slasher:__subpackages__", "//tools:__subpackages__", "//validator:__subpackages__", + "//endtoend/evaluators:__pkg__", ], deps = [ "//beacon-chain/cache:go_default_library", @@ -48,6 +52,7 @@ go_test( "randao_test.go", "rewards_penalties_test.go", "shuffle_test.go", + "signing_root_test.go", "slot_epoch_test.go", "validators_test.go", ], @@ -64,6 +69,7 @@ go_test( "//shared/params:go_default_library", "//shared/sliceutil:go_default_library", "//shared/testutil:go_default_library", + "@com_github_google_gofuzz//:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", "@com_github_prysmaticlabs_go_ssz//:go_default_library", diff --git a/beacon-chain/core/helpers/attestation.go b/beacon-chain/core/helpers/attestation.go index fc33ac1982..ff653efd95 100644 --- a/beacon-chain/core/helpers/attestation.go +++ b/beacon-chain/core/helpers/attestation.go @@ -5,7 +5,6 @@ import ( "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/hashutil" @@ -124,15 +123,15 @@ func AggregateAttestation(a1 *ethpb.Attestation, a2 *ethpb.Attestation) (*ethpb. // domain = get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot)) // return bls_sign(privkey, hash_tree_root(slot), domain) func SlotSignature(state *stateTrie.BeaconState, slot uint64, privKey *bls.SecretKey) (*bls.Signature, error) { - d, err := Domain(state.Fork(), CurrentEpoch(state), params.BeaconConfig().DomainBeaconAttester) + d, err := Domain(state.Fork(), CurrentEpoch(state), params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot()) if err != nil { return nil, err } - s, err := ssz.HashTreeRoot(slot) + s, err := ComputeSigningRoot(slot, d) if err != nil { return nil, err } - return privKey.Sign(s[:], d), nil + return privKey.Sign(s[:]), nil } // IsAggregator returns true if the signature is from the input validator. The committee diff --git a/beacon-chain/core/helpers/attestation_test.go b/beacon-chain/core/helpers/attestation_test.go index 6debb168f8..b81496a93e 100644 --- a/beacon-chain/core/helpers/attestation_test.go +++ b/beacon-chain/core/helpers/attestation_test.go @@ -202,7 +202,7 @@ func TestAggregateAttestations(t *testing.T) { atts := make([]*ethpb.Attestation, len(bl)) for i, b := range bl { sk := bls.RandKey() - sig := sk.Sign([]byte("dummy_test_data"), 0 /*domain*/) + sig := sk.Sign([]byte("dummy_test_data")) atts[i] = ðpb.Attestation{ AggregationBits: b, Data: nil, @@ -258,15 +258,15 @@ func TestSlotSignature_Verify(t *testing.T) { t.Fatal(err) } - domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - msg, err := ssz.HashTreeRoot(slot) + msg, err := helpers.ComputeSigningRoot(slot, domain) if err != nil { t.Fatal(err) } - if !sig.Verify(msg[:], pub, domain) { + if !sig.Verify(msg[:], pub) { t.Error("Could not verify slot signature") } } @@ -278,7 +278,7 @@ func TestIsAggregator_True(t *testing.T) { if err != nil { t.Fatal(err) } - sig := privKeys[0].Sign([]byte{}, 0) + sig := privKeys[0].Sign([]byte{'A'}) agg, err := helpers.IsAggregator(uint64(len(committee)), sig.Marshal()) if err != nil { t.Fatal(err) @@ -297,7 +297,7 @@ func TestIsAggregator_False(t *testing.T) { if err != nil { t.Fatal(err) } - sig := privKeys[0].Sign([]byte{}, 0) + sig := privKeys[0].Sign([]byte{'A'}) agg, err := helpers.IsAggregator(uint64(len(committee)), sig.Marshal()) if err != nil { t.Fatal(err) @@ -310,11 +310,11 @@ func TestIsAggregator_False(t *testing.T) { func TestAggregateSignature_True(t *testing.T) { pubkeys := make([]*bls.PublicKey, 0, 100) atts := make([]*ethpb.Attestation, 0, 100) - msg := []byte("hello") + msg := bytesutil.ToBytes32([]byte("hello")) for i := 0; i < 100; i++ { priv := bls.RandKey() pub := priv.PublicKey() - sig := priv.Sign(msg[:], 0) + sig := priv.Sign(msg[:]) pubkeys = append(pubkeys, pub) att := ðpb.Attestation{Signature: sig.Marshal()} atts = append(atts, att) @@ -323,7 +323,7 @@ func TestAggregateSignature_True(t *testing.T) { if err != nil { t.Fatal(err) } - if !aggSig.VerifyAggregateCommon(pubkeys, bytesutil.ToBytes32(msg), 0) { + if !aggSig.FastAggregateVerify(pubkeys, msg) { t.Error("Signature did not verify") } } @@ -335,7 +335,7 @@ func TestAggregateSignature_False(t *testing.T) { for i := 0; i < 100; i++ { priv := bls.RandKey() pub := priv.PublicKey() - sig := priv.Sign(msg[:], 0) + sig := priv.Sign(msg[:]) pubkeys = append(pubkeys, pub) att := ðpb.Attestation{Signature: sig.Marshal()} atts = append(atts, att) @@ -344,7 +344,7 @@ func TestAggregateSignature_False(t *testing.T) { if err != nil { t.Fatal(err) } - if aggSig.VerifyAggregateCommon(pubkeys, bytesutil.ToBytes32(msg), 0) { + if aggSig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msg)) { t.Error("Signature not suppose to verify") } } diff --git a/beacon-chain/core/helpers/committee.go b/beacon-chain/core/helpers/committee.go index 2485c794e4..1f16ce9ca5 100644 --- a/beacon-chain/core/helpers/committee.go +++ b/beacon-chain/core/helpers/committee.go @@ -181,7 +181,10 @@ type CommitteeAssignmentContainer struct { // 2. Compute all committees. // 3. Determine the attesting slot for each committee. // 4. Construct a map of validator indices pointing to the respective committees. -func CommitteeAssignments(state *stateTrie.BeaconState, epoch uint64) (map[uint64]*CommitteeAssignmentContainer, map[uint64]uint64, error) { +func CommitteeAssignments( + state *stateTrie.BeaconState, + epoch uint64, +) (map[uint64]*CommitteeAssignmentContainer, map[uint64][]uint64, error) { nextEpoch := NextEpoch(state) if epoch > nextEpoch { return nil, nil, fmt.Errorf( @@ -191,9 +194,11 @@ func CommitteeAssignments(state *stateTrie.BeaconState, epoch uint64) (map[uint6 ) } - // Track which slot has which proposer. + // We determine the slots in which proposers are supposed to act. + // Some validators may need to propose multiple times per epoch, so + // we use a map of proposer idx -> []slot to keep track of this possibility. startSlot := StartSlot(epoch) - proposerIndexToSlot := make(map[uint64]uint64) + proposerIndexToSlots := make(map[uint64][]uint64) for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ { if err := state.SetSlot(slot); err != nil { return nil, nil, err @@ -202,7 +207,7 @@ func CommitteeAssignments(state *stateTrie.BeaconState, epoch uint64) (map[uint6 if err != nil { return nil, nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot()) } - proposerIndexToSlot[i] = slot + proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot) } activeValidatorIndices, err := ActiveValidatorIndices(state, epoch) @@ -235,85 +240,7 @@ func CommitteeAssignments(state *stateTrie.BeaconState, epoch uint64) (map[uint6 } } - return validatorIndexToCommittee, proposerIndexToSlot, nil -} - -// CommitteeAssignment is used to query committee assignment from -// current and previous epoch. -// -// Deprecated: Consider using CommitteeAssignments, especially when computing more than one -// validator assignment as this method is O(n^2) in computational complexity. This method exists to -// ensure spec definition conformance and otherwise should probably not be used. -// -// Spec pseudocode definition: -// def get_committee_assignment(state: BeaconState, -// epoch: Epoch, -// validator_index: ValidatorIndex -// ) -> Optional[Tuple[Sequence[ValidatorIndex], CommitteeIndex, Slot]]: -// """ -// Return the committee assignment in the ``epoch`` for ``validator_index``. -// ``assignment`` returned is a tuple of the following form: -// * ``assignment[0]`` is the list of validators in the committee -// * ``assignment[1]`` is the index to which the committee is assigned -// * ``assignment[2]`` is the slot at which the committee is assigned -// Return None if no assignment. -// """ -// next_epoch = get_current_epoch(state) + 1 -// assert epoch <= next_epoch -// -// start_slot = compute_start_slot_at_epoch(epoch) -// for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): -// for index in range(get_committee_count_at_slot(state, Slot(slot))): -// committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index)) -// if validator_index in committee: -// return committee, CommitteeIndex(index), Slot(slot) -// return None -func CommitteeAssignment( - state *stateTrie.BeaconState, - epoch uint64, - validatorIndex uint64, -) ([]uint64, uint64, uint64, uint64, error) { - nextEpoch := NextEpoch(state) - if epoch > nextEpoch { - return nil, 0, 0, 0, fmt.Errorf( - "epoch %d can't be greater than next epoch %d", - epoch, nextEpoch) - } - - // Track which slot has which proposer. - startSlot := StartSlot(epoch) - proposerIndexToSlot := make(map[uint64]uint64) - for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ { - if err := state.SetSlot(slot); err != nil { - return nil, 0, 0, 0, err - } - i, err := BeaconProposerIndex(state) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot()) - } - proposerIndexToSlot[i] = slot - } - - activeValidatorIndices, err := ActiveValidatorIndices(state, epoch) - if err != nil { - return nil, 0, 0, 0, err - } - for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ { - countAtSlot := SlotCommitteeCount(uint64(len(activeValidatorIndices))) - for i := uint64(0); i < countAtSlot; i++ { - committee, err := BeaconCommitteeFromState(state, slot, i) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf(err, "could not get crosslink committee at slot %d", slot) - } - for _, v := range committee { - if validatorIndex == v { - proposerSlot, _ := proposerIndexToSlot[v] - return committee, i, slot, proposerSlot, nil - } - } - } - } - return []uint64{}, 0, 0, 0, fmt.Errorf("validator with index %d not found in assignments", validatorIndex) + return validatorIndexToCommittee, proposerIndexToSlots, nil } // VerifyBitfieldLength verifies that a bitfield length matches the given committee size. @@ -409,7 +336,6 @@ func UpdateCommitteeCache(state *stateTrie.BeaconState, epoch uint64) error { // UpdateProposerIndicesInCache updates proposer indices entry of the committee cache. func UpdateProposerIndicesInCache(state *stateTrie.BeaconState, epoch uint64) error { - indices, err := ActiveValidatorIndices(state, epoch) if err != nil { return nil diff --git a/beacon-chain/core/helpers/committee_test.go b/beacon-chain/core/helpers/committee_test.go index 9eb4b6efae..22e8d751e5 100644 --- a/beacon-chain/core/helpers/committee_test.go +++ b/beacon-chain/core/helpers/committee_test.go @@ -4,7 +4,6 @@ import ( "fmt" "reflect" "strconv" - "strings" "testing" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" @@ -193,160 +192,6 @@ func TestVerifyBitfieldLength_OK(t *testing.T) { } } -func TestCommitteeAssignment_CanRetrieve(t *testing.T) { - ClearCache() - // Initialize test with 128 validators, each slot and each index gets 2 validators. - validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch) - for i := 0; i < len(validators); i++ { - validators[i] = ðpb.Validator{ - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - } - } - state, err := beaconstate.InitializeFromProto(&pb.BeaconState{ - Validators: validators, - Slot: params.BeaconConfig().SlotsPerEpoch, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { - t.Fatal(err) - } - - tests := []struct { - index uint64 - slot uint64 - committee []uint64 - committeeIndex uint64 - isProposer bool - proposerSlot uint64 - }{ - { - index: 0, - slot: 78, - committee: []uint64{0, 38}, - committeeIndex: 0, - isProposer: false, - }, - { - index: 1, - slot: 71, - committee: []uint64{1, 4}, - committeeIndex: 0, - isProposer: true, - proposerSlot: 79, - }, - { - index: 11, - slot: 90, - committee: []uint64{31, 11}, - committeeIndex: 0, - isProposer: false, - }, - } - - for i, tt := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - committee, committeeIndex, slot, proposerSlot, err := CommitteeAssignment(state, tt.slot/params.BeaconConfig().SlotsPerEpoch, tt.index) - if err != nil { - t.Fatalf("failed to execute NextEpochCommitteeAssignment: %v", err) - } - if committeeIndex != tt.committeeIndex { - t.Errorf("wanted committeeIndex %d, got committeeIndex %d for validator index %d", - tt.committeeIndex, committeeIndex, tt.index) - } - if slot != tt.slot { - t.Errorf("wanted slot %d, got slot %d for validator index %d", - tt.slot, slot, tt.index) - } - if proposerSlot != tt.proposerSlot { - t.Errorf("wanted proposer slot %d, got proposer slot %d for validator index %d", - tt.proposerSlot, proposerSlot, tt.index) - } - if !reflect.DeepEqual(committee, tt.committee) { - t.Errorf("wanted committee %v, got committee %v for validator index %d", - tt.committee, committee, tt.index) - } - if proposerSlot != tt.proposerSlot { - t.Errorf("wanted proposer slot slot %d, got slot %d for validator index %d", - tt.slot, slot, tt.index) - } - }) - } -} - -func TestCommitteeAssignment_CantFindValidator(t *testing.T) { - ClearCache() - validators := make([]*ethpb.Validator, 1) - for i := 0; i < len(validators); i++ { - validators[i] = ðpb.Validator{ - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - } - } - state, err := beaconstate.InitializeFromProto(&pb.BeaconState{ - Validators: validators, - Slot: params.BeaconConfig().SlotsPerEpoch, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { - t.Fatal(err) - } - - index := uint64(10000) - _, _, _, _, err = CommitteeAssignment(state, 1, index) - if err != nil && !strings.Contains(err.Error(), "not found in assignments") { - t.Errorf("Wanted 'not found in assignments', received %v", err) - } -} - -// Test helpers.CommitteeAssignments against the results of helpers.CommitteeAssignment by validator -// index. Warning: this test is a bit slow! -func TestCommitteeAssignments_AgreesWithSpecDefinitionMethod(t *testing.T) { - ClearCache() - // Initialize test with 256 validators, each slot and each index gets 4 validators. - validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch) - for i := 0; i < len(validators); i++ { - validators[i] = ðpb.Validator{ - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - } - } - state, err := beaconstate.InitializeFromProto(&pb.BeaconState{ - Validators: validators, - Slot: params.BeaconConfig().SlotsPerEpoch, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { - t.Fatal(err) - } - // Test for 2 epochs. - for epoch := uint64(0); epoch < 2; epoch++ { - state, err := beaconstate.InitializeFromProto(state.CloneInnerState()) - if err != nil { - t.Fatal(err) - } - assignments, proposers, err := CommitteeAssignments(state, epoch) - if err != nil { - t.Fatal(err) - } - for i := uint64(0); int(i) < len(validators); i++ { - committee, committeeIndex, slot, proposerSlot, err := CommitteeAssignment(state, epoch, i) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(committee, assignments[i].Committee) { - t.Errorf("Computed different committees for validator %d", i) - } - if committeeIndex != assignments[i].CommitteeIndex { - t.Errorf("Computed different committee index for validator %d", i) - } - if slot != assignments[i].AttesterSlot { - t.Errorf("Computed different attesting slot for validator %d", i) - } - if proposerSlot != proposers[i] { - t.Errorf("Computed different proposing slot for validator %d", i) - } - } - } -} - func TestCommitteeAssignments_CanRetrieve(t *testing.T) { // Initialize test with 256 validators, each slot and each index gets 4 validators. validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch) @@ -412,7 +257,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) { for i, tt := range tests { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { ClearCache() - validatorIndexToCommittee, proposerIndexToSlot, err := CommitteeAssignments(state, SlotToEpoch(tt.slot)) + validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(state, SlotToEpoch(tt.slot)) if err != nil { t.Fatalf("failed to determine CommitteeAssignments: %v", err) } @@ -425,9 +270,9 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) { t.Errorf("wanted slot %d, got slot %d for validator index %d", tt.slot, cac.AttesterSlot, tt.index) } - if proposerIndexToSlot[tt.index] != tt.proposerSlot { + if len(proposerIndexToSlots[tt.index]) > 0 && proposerIndexToSlots[tt.index][0] != tt.proposerSlot { t.Errorf("wanted proposer slot %d, got proposer slot %d for validator index %d", - tt.proposerSlot, proposerIndexToSlot[tt.index], tt.index) + tt.proposerSlot, proposerIndexToSlots[tt.index][0], tt.index) } if !reflect.DeepEqual(cac.Committee, tt.committee) { t.Errorf("wanted committee %v, got committee %v for validator index %d", diff --git a/beacon-chain/core/helpers/rewards_penalties.go b/beacon-chain/core/helpers/rewards_penalties.go index 16bac0ecde..a655abcc3d 100644 --- a/beacon-chain/core/helpers/rewards_penalties.go +++ b/beacon-chain/core/helpers/rewards_penalties.go @@ -2,6 +2,7 @@ package helpers import ( stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/shared/params" ) // TotalBalance returns the total amount at stake in Gwei @@ -10,9 +11,10 @@ import ( // Spec pseudocode definition: // def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei: // """ -// Return the combined effective balance of the ``indices``. (1 Gwei minimum to avoid divisions by zero.) +// Return the combined effective balance of the ``indices``. +// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. // """ -// return Gwei(max(1, sum([state.validators[index].effective_balance for index in indices]))) +// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices]))) func TotalBalance(state *stateTrie.BeaconState, indices []uint64) uint64 { total := uint64(0) @@ -24,9 +26,9 @@ func TotalBalance(state *stateTrie.BeaconState, indices []uint64) uint64 { total += val.EffectiveBalance() } - // Return 1 Gwei minimum to avoid divisions by zero + // Return EFFECTIVE_BALANCE_INCREMENT to avoid divisions by zero. if total == 0 { - return 1 + return params.BeaconConfig().EffectiveBalanceIncrement } return total diff --git a/beacon-chain/core/helpers/rewards_penalties_test.go b/beacon-chain/core/helpers/rewards_penalties_test.go index 9c84a2b37b..ec3c41de3d 100644 --- a/beacon-chain/core/helpers/rewards_penalties_test.go +++ b/beacon-chain/core/helpers/rewards_penalties_test.go @@ -27,14 +27,14 @@ func TestTotalBalance_OK(t *testing.T) { } } -func TestTotalBalance_ReturnsOne(t *testing.T) { +func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) { state, err := beaconstate.InitializeFromProto(&pb.BeaconState{Validators: []*ethpb.Validator{}}) if err != nil { t.Fatal(err) } balance := TotalBalance(state, []uint64{}) - wanted := uint64(1) + wanted := params.BeaconConfig().EffectiveBalanceIncrement if balance != wanted { t.Errorf("Incorrect TotalBalance. Wanted: %d, got: %d", wanted, balance) diff --git a/beacon-chain/core/helpers/signing_root.go b/beacon-chain/core/helpers/signing_root.go new file mode 100644 index 0000000000..abdd658ba0 --- /dev/null +++ b/beacon-chain/core/helpers/signing_root.go @@ -0,0 +1,146 @@ +package helpers + +import ( + "github.com/pkg/errors" + "github.com/prysmaticlabs/go-ssz" + p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/shared/bytesutil" + "github.com/prysmaticlabs/prysm/shared/params" +) + +// ForkVersionByteLength length of fork version byte array. +const ForkVersionByteLength = 4 + +// DomainByteLength length of domain byte array. +const DomainByteLength = 4 + +// ErrSigFailedToVerify returns when a signature of a block object(ie attestation, slashing, exit... etc) +// failed to verify. +var ErrSigFailedToVerify = errors.New("signature did not verify") + +// ComputeSigningRoot computes the root of the object by calculating the root of the object domain tree. +// +// Spec pseudocode definition: +// def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root: +// """ +// Return the signing root of an object by calculating the root of the object-domain tree. +// """ +// domain_wrapped_object = SigningRoot( +// object_root=hash_tree_root(ssz_object), +// domain=domain, +// ) +// return hash_tree_root(domain_wrapped_object) +func ComputeSigningRoot(object interface{}, domain []byte) ([32]byte, error) { + objRoot, err := ssz.HashTreeRoot(object) + if err != nil { + return [32]byte{}, err + } + container := &p2ppb.SigningRoot{ + ObjectRoot: objRoot[:], + Domain: domain, + } + return ssz.HashTreeRoot(container) +} + +// VerifySigningRoot verifies the signing root of an object given it's public key, signature and domain. +func VerifySigningRoot(obj interface{}, pub []byte, signature []byte, domain []byte) error { + publicKey, err := bls.PublicKeyFromBytes(pub) + if err != nil { + return errors.Wrap(err, "could not convert bytes to public key") + } + sig, err := bls.SignatureFromBytes(signature) + if err != nil { + return errors.Wrap(err, "could not convert bytes to signature") + } + root, err := ComputeSigningRoot(obj, domain) + if err != nil { + return errors.Wrap(err, "could not compute signing root") + } + if !sig.Verify(root[:], publicKey) { + return ErrSigFailedToVerify + } + return nil +} + +// ComputeDomain returns the domain version for BLS private key to sign and verify with a zeroed 4-byte +// array as the fork version. +// +// def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_validators_root: Root=None) -> Domain: +// """ +// Return the domain for the ``domain_type`` and ``fork_version``. +// """ +// if fork_version is None: +// fork_version = GENESIS_FORK_VERSION +// if genesis_validators_root is None: +// genesis_validators_root = Root() # all bytes zero by default +// fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root) +// return Domain(domain_type + fork_data_root[:28]) +func ComputeDomain(domainType [DomainByteLength]byte, forkVersion []byte, genesisValidatorsRoot []byte) ([]byte, error) { + if forkVersion == nil { + forkVersion = params.BeaconConfig().GenesisForkVersion + } + if genesisValidatorsRoot == nil { + genesisValidatorsRoot = params.BeaconConfig().ZeroHash[:] + } + forkBytes := [ForkVersionByteLength]byte{} + copy(forkBytes[:], forkVersion) + + forkDataRoot, err := computeForkDataRoot(forkBytes[:], genesisValidatorsRoot) + if err != nil { + return nil, err + } + + return domain(domainType, forkDataRoot[:]), nil +} + +// This returns the bls domain given by the domain type and fork data root. +func domain(domainType [DomainByteLength]byte, forkDataRoot []byte) []byte { + b := []byte{} + b = append(b, domainType[:4]...) + b = append(b, forkDataRoot[:28]...) + return b +} + +// this returns the 32byte fork data root for the ``current_version`` and ``genesis_validators_root``. +// This is used primarily in signature domains to avoid collisions across forks/chains. +// +// Spec pseudocode definition: +// def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root: +// """ +// Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``. +// This is used primarily in signature domains to avoid collisions across forks/chains. +// """ +// return hash_tree_root(ForkData( +// current_version=current_version, +// genesis_validators_root=genesis_validators_root, +// )) +func computeForkDataRoot(version []byte, root []byte) ([32]byte, error) { + r, err := ssz.HashTreeRoot(&pb.ForkData{ + CurrentVersion: version, + GenesisValidatorsRoot: root, + }) + if err != nil { + return [32]byte{}, err + } + return r, nil +} + +// ComputeForkDigest returns the fork for the current version and genesis validator root +// +// Spec pseudocode definition: +// def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest: +// """ +// Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``. +// This is a digest primarily used for domain separation on the p2p layer. +// 4-bytes suffices for practical separation of forks/chains. +// """ +// return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4]) +func ComputeForkDigest(version []byte, genesisValidatorsRoot []byte) ([4]byte, error) { + dataRoot, err := computeForkDataRoot(version, genesisValidatorsRoot) + if err != nil { + return [4]byte{}, nil + } + return bytesutil.ToBytes4(dataRoot[:]), nil +} diff --git a/beacon-chain/core/helpers/signing_root_test.go b/beacon-chain/core/helpers/signing_root_test.go new file mode 100644 index 0000000000..339bfe232a --- /dev/null +++ b/beacon-chain/core/helpers/signing_root_test.go @@ -0,0 +1,86 @@ +package helpers + +import ( + "bytes" + "testing" + + fuzz "github.com/google/gofuzz" + ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/params" +) + +func TestSigningRoot_ComputeOK(t *testing.T) { + emptyBlock := ðpb.BeaconBlock{} + _, err := ComputeSigningRoot(emptyBlock, []byte{'T', 'E', 'S', 'T'}) + if err != nil { + t.Errorf("Could not compute signing root of block: %v", err) + } +} + +func TestComputeDomain_OK(t *testing.T) { + tests := []struct { + epoch uint64 + domainType [4]byte + domain []byte + }{ + {epoch: 1, domainType: [4]byte{4, 0, 0, 0}, domain: []byte{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {epoch: 2, domainType: [4]byte{4, 0, 0, 0}, domain: []byte{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {epoch: 2, domainType: [4]byte{5, 0, 0, 0}, domain: []byte{5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {epoch: 3, domainType: [4]byte{4, 0, 0, 0}, domain: []byte{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + {epoch: 3, domainType: [4]byte{5, 0, 0, 0}, domain: []byte{5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + } + for _, tt := range tests { + if !bytes.Equal(domain(tt.domainType, params.BeaconConfig().ZeroHash[:]), tt.domain) { + t.Errorf("wanted domain version: %d, got: %d", tt.domain, domain(tt.domainType, params.BeaconConfig().ZeroHash[:])) + } + } +} + +func TestComputeForkDigest_OK(t *testing.T) { + tests := []struct { + version []byte + root [32]byte + result [4]byte + }{ + {version: []byte{'A', 'B', 'C', 'D'}, root: [32]byte{'i', 'o', 'p'}, result: [4]byte{0x69, 0x5c, 0x26, 0x47}}, + {version: []byte{'i', 'm', 'n', 'a'}, root: [32]byte{'z', 'a', 'b'}, result: [4]byte{0x1c, 0x38, 0x84, 0x58}}, + {version: []byte{'b', 'w', 'r', 't'}, root: [32]byte{'r', 'd', 'c'}, result: [4]byte{0x83, 0x34, 0x38, 0x88}}, + } + for _, tt := range tests { + digest, err := ComputeForkDigest(tt.version, tt.root[:]) + if err != nil { + t.Error(err) + } + if digest != tt.result { + t.Errorf("wanted domain version: %#x, got: %#x", digest, tt.result) + } + } +} + +func TestFuzzverifySigningRoot_10000(t *testing.T) { + fuzzer := fuzz.NewWithSeed(0) + state := ðereum_beacon_p2p_v1.BeaconState{} + pubkey := [48]byte{} + sig := [96]byte{} + domain := [4]byte{} + p := []byte{} + s := []byte{} + d := []byte{} + for i := 0; i < 10000; i++ { + fuzzer.Fuzz(state) + fuzzer.Fuzz(&pubkey) + fuzzer.Fuzz(&sig) + fuzzer.Fuzz(&domain) + fuzzer.Fuzz(state) + fuzzer.Fuzz(&p) + fuzzer.Fuzz(&s) + fuzzer.Fuzz(&d) + if err := VerifySigningRoot(state, pubkey[:], sig[:], domain[:]); err != nil { + t.Log(err) + } + if err := VerifySigningRoot(state, p, s, d); err != nil { + t.Log(err) + } + } +} diff --git a/beacon-chain/core/helpers/slot_epoch.go b/beacon-chain/core/helpers/slot_epoch.go index b6228ad9fe..1f69af56a2 100644 --- a/beacon-chain/core/helpers/slot_epoch.go +++ b/beacon-chain/core/helpers/slot_epoch.go @@ -89,15 +89,17 @@ func SlotsSinceEpochStarts(slot uint64) uint64 { return slot - StartSlot(SlotToEpoch(slot)) } -// Allow for slots "from the future" within a certain tolerance. -const timeShiftTolerance = 10 // ms +// TimeShiftTolerance specifies the tolerance threshold for slots "from the future". +const TimeShiftTolerance = 500 * time.Millisecond // ms // VerifySlotTime validates the input slot is not from the future. -func VerifySlotTime(genesisTime uint64, slot uint64) error { - slotTime := genesisTime + slot*params.BeaconConfig().SecondsPerSlot - currentTime := uint64(roughtime.Now().Unix()) - if slotTime > currentTime+timeShiftTolerance { - return fmt.Errorf("could not process slot from the future, slot time %d > current time %d", slotTime, currentTime) +func VerifySlotTime(genesisTime uint64, slot uint64, timeTolerance time.Duration) error { + // denominate everything in milliseconds + slotTime := 1000 * (genesisTime + slot*params.BeaconConfig().SecondsPerSlot) + currentTime := 1000 * uint64(roughtime.Now().Unix()) + tolerance := uint64(timeTolerance.Milliseconds()) + if slotTime > currentTime+tolerance { + return fmt.Errorf("could not process slot from the future, slot time(ms) %d > current time(ms) %d", slotTime, currentTime) } return nil } diff --git a/beacon-chain/core/helpers/validators.go b/beacon-chain/core/helpers/validators.go index bdb8acd290..a6f76d8c2e 100644 --- a/beacon-chain/core/helpers/validators.go +++ b/beacon-chain/core/helpers/validators.go @@ -235,18 +235,16 @@ func ComputeProposerIndex(validators []*ethpb.Validator, activeIndices []uint64, // Domain returns the domain version for BLS private key to sign and verify. // // Spec pseudocode definition: -// def get_domain(state: BeaconState, -// domain_type: int, -// message_epoch: Epoch=None) -> int: +// def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch=None) -> Domain: // """ // Return the signature domain (fork version concatenated with domain type) of a message. // """ -// epoch = get_current_epoch(state) if message_epoch is None else message_epoch +// epoch = get_current_epoch(state) if epoch is None else epoch // fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version -// return bls_domain(domain_type, fork_version) -func Domain(fork *pb.Fork, epoch uint64, domainType [bls.DomainByteLength]byte) (uint64, error) { +// return compute_domain(domain_type, fork_version, state.genesis_validators_root) +func Domain(fork *pb.Fork, epoch uint64, domainType [bls.DomainByteLength]byte, genesisRoot []byte) ([]byte, error) { if fork == nil { - return 0, errors.New("nil fork or domain type") + return []byte{}, errors.New("nil fork or domain type") } var forkVersion []byte if epoch < fork.Epoch { @@ -255,11 +253,11 @@ func Domain(fork *pb.Fork, epoch uint64, domainType [bls.DomainByteLength]byte) forkVersion = fork.CurrentVersion } if len(forkVersion) != 4 { - return 0, errors.New("fork version length is not 4 byte") + return []byte{}, errors.New("fork version length is not 4 byte") } var forkVersionArray [4]byte copy(forkVersionArray[:], forkVersion[:4]) - return bls.Domain(domainType, forkVersionArray), nil + return ComputeDomain(domainType, forkVersionArray[:], genesisRoot) } // IsEligibleForActivationQueue checks if the validator is eligible to diff --git a/beacon-chain/core/helpers/validators_test.go b/beacon-chain/core/helpers/validators_test.go index 7c413f60b0..fe449c50d4 100644 --- a/beacon-chain/core/helpers/validators_test.go +++ b/beacon-chain/core/helpers/validators_test.go @@ -1,6 +1,7 @@ package helpers import ( + "bytes" "reflect" "testing" @@ -243,22 +244,22 @@ func TestDomain_OK(t *testing.T) { } tests := []struct { epoch uint64 - domainType uint64 - version uint64 + domainType [4]byte + result []byte }{ - {epoch: 1, domainType: 4, version: 144115188075855876}, - {epoch: 2, domainType: 4, version: 144115188075855876}, - {epoch: 2, domainType: 5, version: 144115188075855877}, - {epoch: 3, domainType: 4, version: 216172782113783812}, - {epoch: 3, domainType: 5, version: 216172782113783813}, + {epoch: 1, domainType: bytesutil.ToBytes4(bytesutil.Bytes4(4)), result: bytesutil.ToBytes(947067381421703172, 32)}, + {epoch: 2, domainType: bytesutil.ToBytes4(bytesutil.Bytes4(4)), result: bytesutil.ToBytes(947067381421703172, 32)}, + {epoch: 2, domainType: bytesutil.ToBytes4(bytesutil.Bytes4(5)), result: bytesutil.ToBytes(947067381421703173, 32)}, + {epoch: 3, domainType: bytesutil.ToBytes4(bytesutil.Bytes4(4)), result: bytesutil.ToBytes(9369798235163459588, 32)}, + {epoch: 3, domainType: bytesutil.ToBytes4(bytesutil.Bytes4(5)), result: bytesutil.ToBytes(9369798235163459589, 32)}, } for _, tt := range tests { - domain, err := Domain(state.Fork, tt.epoch, bytesutil.ToBytes4(bytesutil.Bytes4(tt.domainType))) + domain, err := Domain(state.Fork, tt.epoch, tt.domainType, nil) if err != nil { t.Fatal(err) } - if domain != tt.version { - t.Errorf("wanted domain version: %d, got: %d", tt.version, domain) + if !bytes.Equal(domain[:8], tt.result[:8]) { + t.Errorf("wanted domain version: %d, got: %d", tt.result, domain) } } } diff --git a/beacon-chain/core/state/BUILD.bazel b/beacon-chain/core/state/BUILD.bazel index fe1aa1a565..4deb3c76a5 100644 --- a/beacon-chain/core/state/BUILD.bazel +++ b/beacon-chain/core/state/BUILD.bazel @@ -57,6 +57,7 @@ go_test( "//beacon-chain/core/blocks:go_default_library", "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stateutil:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//shared/attestationutil:go_default_library", "//shared/bls:go_default_library", diff --git a/beacon-chain/core/state/benchmarks_test.go b/beacon-chain/core/state/benchmarks_test.go index 58c389610b..2a618850a4 100644 --- a/beacon-chain/core/state/benchmarks_test.go +++ b/beacon-chain/core/state/benchmarks_test.go @@ -27,9 +27,14 @@ func TestBenchmarkExecuteStateTransition(t *testing.T) { t.Fatal(err) } - if _, err := state.ExecuteStateTransition(context.Background(), beaconState, block); err != nil { + oldSlot := beaconState.Slot() + beaconState, err = state.ExecuteStateTransition(context.Background(), beaconState, block) + if err != nil { t.Fatalf("failed to process block, benchmarks will fail: %v", err) } + if oldSlot == beaconState.Slot() { + t.Fatal("Expected slots to be different") + } } func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) { diff --git a/beacon-chain/core/state/state.go b/beacon-chain/core/state/state.go index 33116c958b..7afeda0af1 100644 --- a/beacon-chain/core/state/state.go +++ b/beacon-chain/core/state/state.go @@ -12,6 +12,7 @@ import ( "github.com/prysmaticlabs/go-ssz" b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/trieutil" @@ -137,10 +138,16 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState *stateTrie.BeaconS slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector) + genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators()) + if err != nil { + return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err) + } + state := &pb.BeaconState{ // Misc fields. - Slot: 0, - GenesisTime: genesisTime, + Slot: 0, + GenesisTime: genesisTime, + GenesisValidatorsRoot: genesisValidatorsRoot[:], Fork: &pb.Fork{ PreviousVersion: params.BeaconConfig().GenesisForkVersion, diff --git a/beacon-chain/core/state/transition_test.go b/beacon-chain/core/state/transition_test.go index e73c1d4a61..e04dace734 100644 --- a/beacon-chain/core/state/transition_test.go +++ b/beacon-chain/core/state/transition_test.go @@ -15,6 +15,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/attestationutil" "github.com/prysmaticlabs/prysm/shared/bls" @@ -75,7 +76,7 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) { if err != nil { t.Fatal(err) } - parentRoot, err := ssz.HashTreeRoot(beaconState.LatestBlockHeader()) + parentRoot, err := stateutil.BlockHeaderRoot(beaconState.LatestBlockHeader()) if err != nil { t.Error(err) } @@ -93,8 +94,9 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) { } block := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ - Slot: beaconState.Slot() + 1, - ParentRoot: parentRoot[:], + ProposerIndex: 74, + Slot: beaconState.Slot() + 1, + ParentRoot: parentRoot[:], Body: ðpb.BeaconBlockBody{ RandaoReveal: randaoReveal, Eth1Data: eth1Data, @@ -146,10 +148,6 @@ func TestProcessBlock_IncorrectProposerSlashing(t *testing.T) { } block.Block.Body.ProposerSlashings = []*ethpb.ProposerSlashing{slashing} - blockRoot, err := ssz.HashTreeRoot(block.Block) - if err != nil { - t.Fatal(err) - } if err := beaconState.SetSlot(beaconState.Slot() + 1); err != nil { t.Fatal(err) } @@ -160,11 +158,15 @@ func TestProcessBlock_IncorrectProposerSlashing(t *testing.T) { if err := beaconState.SetSlot(beaconState.Slot() - 1); err != nil { t.Fatal(err) } - domain, err := helpers.Domain(beaconState.Fork(), helpers.CurrentEpoch(beaconState), params.BeaconConfig().DomainBeaconProposer) + domain, err := helpers.Domain(beaconState.Fork(), helpers.CurrentEpoch(beaconState), params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - sig := privKeys[proposerIdx].Sign(blockRoot[:], domain) + root, err := helpers.ComputeSigningRoot(block.Block, domain) + if err != nil { + t.Fatal(err) + } + sig := privKeys[proposerIdx].Sign(root[:]) block.Signature = sig.Marshal() beaconState, err = state.ProcessSlots(context.Background(), beaconState, 1) @@ -194,10 +196,6 @@ func TestProcessBlock_IncorrectProcessBlockAttestations(t *testing.T) { t.Fatal(err) } block.Block.Body.Attestations = []*ethpb.Attestation{att} - blockRoot, err := ssz.HashTreeRoot(block.Block) - if err != nil { - t.Fatal(err) - } if err := beaconState.SetSlot(beaconState.Slot() + 1); err != nil { t.Fatal(err) } @@ -208,11 +206,15 @@ func TestProcessBlock_IncorrectProcessBlockAttestations(t *testing.T) { if err := beaconState.SetSlot(beaconState.Slot() - 1); err != nil { t.Fatal(err) } - domain, err := helpers.Domain(beaconState.Fork(), helpers.CurrentEpoch(beaconState), params.BeaconConfig().DomainBeaconProposer) + domain, err := helpers.Domain(beaconState.Fork(), helpers.CurrentEpoch(beaconState), params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - sig := privKeys[proposerIdx].Sign(blockRoot[:], domain) + root, err := helpers.ComputeSigningRoot(block.Block, domain) + if err != nil { + t.Fatal(err) + } + sig := privKeys[proposerIdx].Sign(root[:]) block.Signature = sig.Marshal() beaconState, err = state.ProcessSlots(context.Background(), beaconState, 1) @@ -232,16 +234,17 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) { proposerSlashings := []*ethpb.ProposerSlashing{ { - ProposerIndex: 3, Header_1: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 1, + ProposerIndex: 3, + Slot: 1, }, Signature: []byte("A"), }, Header_2: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 1, + ProposerIndex: 3, + Slot: 1, }, Signature: []byte("B"), }, @@ -378,6 +381,7 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) { beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, + beaconState.GenesisValidatorRoot(), ) if err != nil { t.Fatal(err) @@ -385,33 +389,34 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) { header1 := ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 1, - StateRoot: []byte("A"), + ProposerIndex: proposerSlashIdx, + Slot: 1, + StateRoot: []byte("A"), }, } - signingRoot, err := ssz.HashTreeRoot(header1.Header) + root, err := helpers.ComputeSigningRoot(header1.Header, domain) if err != nil { - t.Errorf("Could not get signing root of beacon block header: %v", err) + t.Fatal(err) } - header1.Signature = privKeys[proposerSlashIdx].Sign(signingRoot[:], domain).Marshal()[:] + header1.Signature = privKeys[proposerSlashIdx].Sign(root[:]).Marshal()[:] header2 := ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 1, - StateRoot: []byte("B"), + ProposerIndex: proposerSlashIdx, + Slot: 1, + StateRoot: []byte("B"), }, } - signingRoot, err = ssz.HashTreeRoot(header2.Header) + root, err = helpers.ComputeSigningRoot(header2.Header, domain) if err != nil { - t.Errorf("Could not get signing root of beacon block header: %v", err) + t.Fatal(err) } - header2.Signature = privKeys[proposerSlashIdx].Sign(signingRoot[:], domain).Marshal()[:] + header2.Signature = privKeys[proposerSlashIdx].Sign(root[:]).Marshal()[:] proposerSlashings := []*ethpb.ProposerSlashing{ { - ProposerIndex: proposerSlashIdx, - Header_1: header1, - Header_2: header2, + Header_1: header1, + Header_2: header2, }, } validators := beaconState.Validators() @@ -427,16 +432,16 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) { Target: ðpb.Checkpoint{Epoch: 0}}, AttestingIndices: []uint64{0, 1}, } - hashTreeRoot, err := ssz.HashTreeRoot(att1.Data) - if err != nil { - t.Error(err) - } - domain, err = helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester) + domain, err = helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - sig0 := privKeys[0].Sign(hashTreeRoot[:], domain) - sig1 := privKeys[1].Sign(hashTreeRoot[:], domain) + hashTreeRoot, err := helpers.ComputeSigningRoot(att1.Data, domain) + if err != nil { + t.Error(err) + } + sig0 := privKeys[0].Sign(hashTreeRoot[:]) + sig1 := privKeys[1].Sign(hashTreeRoot[:]) aggregateSig := bls.AggregateSignatures([]*bls.Signature{sig0, sig1}) att1.Signature = aggregateSig.Marshal()[:] @@ -447,12 +452,13 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) { Target: ðpb.Checkpoint{Epoch: 0}}, AttestingIndices: []uint64{0, 1}, } - hashTreeRoot, err = ssz.HashTreeRoot(att2.Data) + + hashTreeRoot, err = helpers.ComputeSigningRoot(att2.Data, domain) if err != nil { t.Error(err) } - sig0 = privKeys[0].Sign(hashTreeRoot[:], domain) - sig1 = privKeys[1].Sign(hashTreeRoot[:], domain) + sig0 = privKeys[0].Sign(hashTreeRoot[:]) + sig1 = privKeys[1].Sign(hashTreeRoot[:]) aggregateSig = bls.AggregateSignatures([]*bls.Signature{sig0, sig1}) att2.Signature = aggregateSig.Marshal()[:] @@ -492,13 +498,13 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) { if err != nil { t.Error(err) } - hashTreeRoot, err = ssz.HashTreeRoot(blockAtt.Data) + hashTreeRoot, err = helpers.ComputeSigningRoot(blockAtt.Data, domain) if err != nil { t.Error(err) } sigs := make([]*bls.Signature, len(attestingIndices)) for i, indice := range attestingIndices { - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } blockAtt.Signature = bls.AggregateSignatures(sigs).Marshal()[:] @@ -509,17 +515,17 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) { Epoch: 0, }, } - signingRoot, err = ssz.HashTreeRoot(exit.Exit) - if err != nil { - t.Errorf("Could not get signing root of beacon block header: %v", err) - } - domain, err = helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainVoluntaryExit) + domain, err = helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainVoluntaryExit, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - exit.Signature = privKeys[exit.Exit.ValidatorIndex].Sign(signingRoot[:], domain).Marshal()[:] + signingRoot, err := helpers.ComputeSigningRoot(exit.Exit, domain) + if err != nil { + t.Errorf("Could not get signing root of beacon block header: %v", err) + } + exit.Signature = privKeys[exit.Exit.ValidatorIndex].Sign(signingRoot[:]).Marshal()[:] - parentRoot, err := ssz.HashTreeRoot(beaconState.LatestBlockHeader()) + parentRoot, err := stateutil.BlockHeaderRoot(beaconState.LatestBlockHeader()) if err != nil { t.Fatal(err) } @@ -530,8 +536,9 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) { } block := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ - ParentRoot: parentRoot[:], - Slot: beaconState.Slot(), + ParentRoot: parentRoot[:], + Slot: beaconState.Slot(), + ProposerIndex: 17, Body: ðpb.BeaconBlockBody{ RandaoReveal: randaoReveal, ProposerSlashings: proposerSlashings, @@ -557,12 +564,12 @@ func TestProcessBlock_PassesProcessingConditions(t *testing.T) { t.Fatalf("Expected block to pass processing conditions: %v", err) } - v, err := beaconState.ValidatorAtIndex(proposerSlashings[0].ProposerIndex) + v, err := beaconState.ValidatorAtIndex(proposerSlashings[0].Header_1.Header.ProposerIndex) if err != nil { t.Fatal(err) } if !v.Slashed { - t.Errorf("Expected validator at index %d to be slashed, received false", proposerSlashings[0].ProposerIndex) + t.Errorf("Expected validator at index %d to be slashed, received false", proposerSlashings[0].Header_1.Header.ProposerIndex) } v, err = beaconState.ValidatorAtIndex(1) if err != nil { @@ -661,16 +668,17 @@ func BenchmarkProcessBlk_65536Validators_FullBlock(b *testing.B) { // Set up proposer slashing object for block proposerSlashings := []*ethpb.ProposerSlashing{ { - ProposerIndex: 1, Header_1: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, + ProposerIndex: 1, + Slot: 0, }, Signature: []byte("A"), }, Header_2: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, + ProposerIndex: 1, + Slot: 0, }, Signature: []byte("B"), }, @@ -723,11 +731,19 @@ func BenchmarkProcessBlk_65536Validators_FullBlock(b *testing.B) { v[proposerIdx].PublicKey = priv.PublicKey().Marshal() buf := make([]byte, 32) binary.LittleEndian.PutUint64(buf, 0) - domain, err := helpers.Domain(s.Fork(), 0, params.BeaconConfig().DomainRandao) + domain, err := helpers.Domain(s.Fork(), 0, params.BeaconConfig().DomainRandao, s.GenesisValidatorRoot()) if err != nil { b.Fatal(err) } - epochSignature := priv.Sign(buf, domain) + ctr := &pb.SigningRoot{ + ObjectRoot: buf, + Domain: domain, + } + root, err = ssz.HashTreeRoot(ctr) + if err != nil { + b.Fatal(err) + } + epochSignature := priv.Sign(root[:]) buf = []byte{params.BeaconConfig().BLSWithdrawalPrefixByte} pubKey := []byte("A") @@ -826,17 +842,17 @@ func TestProcessBlk_AttsBasedOnValidatorCount(t *testing.T) { if err != nil { t.Error(err) } - domain, err := helpers.Domain(s.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(s.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, s.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } sigs := make([]*bls.Signature, len(attestingIndices)) for i, indice := range attestingIndices { - hashTreeRoot, err := ssz.HashTreeRoot(att.Data) + hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, domain) if err != nil { t.Error(err) } - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } att.Signature = bls.AggregateSignatures(sigs).Marshal()[:] @@ -847,14 +863,15 @@ func TestProcessBlk_AttsBasedOnValidatorCount(t *testing.T) { if err != nil { t.Fatal(err) } - parentRoot, err := ssz.HashTreeRoot(s.LatestBlockHeader()) + parentRoot, err := stateutil.BlockHeaderRoot(s.LatestBlockHeader()) if err != nil { t.Fatal(err) } blk := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ - Slot: s.Slot(), - ParentRoot: parentRoot[:], + ProposerIndex: 72, + Slot: s.Slot(), + ParentRoot: parentRoot[:], Body: ðpb.BeaconBlockBody{ Eth1Data: ðpb.Eth1Data{}, RandaoReveal: epochSignature, diff --git a/beacon-chain/db/kv/BUILD.bazel b/beacon-chain/db/kv/BUILD.bazel index 56ac6eef01..9a506b330d 100644 --- a/beacon-chain/db/kv/BUILD.bazel +++ b/beacon-chain/db/kv/BUILD.bazel @@ -43,6 +43,7 @@ go_library( "//shared/traceutil:go_default_library", "@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_ethereum_go_ethereum//common:go_default_library", + "@com_github_ferranbt_fastssz//:go_default_library", "@com_github_gogo_protobuf//proto:go_default_library", "@com_github_golang_snappy//:go_default_library", "@com_github_pkg_errors//:go_default_library", @@ -78,7 +79,6 @@ go_test( deps = [ "//beacon-chain/cache:go_default_library", "//beacon-chain/db/filters:go_default_library", - "//beacon-chain/state:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//proto/testing:go_default_library", "//shared/bytesutil:go_default_library", @@ -89,5 +89,6 @@ go_test( "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", "@com_github_prysmaticlabs_go_ssz//:go_default_library", + "@in_gopkg_d4l3k_messagediff_v1//:go_default_library", ], ) diff --git a/beacon-chain/db/kv/archive_test.go b/beacon-chain/db/kv/archive_test.go index 7f7ba45904..bc521d749b 100644 --- a/beacon-chain/db/kv/archive_test.go +++ b/beacon-chain/db/kv/archive_test.go @@ -38,22 +38,23 @@ func TestStore_ArchivedActiveValidatorChanges(t *testing.T) { }, ProposerSlashings: []*ethpb.ProposerSlashing{ { - ProposerIndex: 1212, Header_1: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 10, - ParentRoot: someRoot[:], - StateRoot: someRoot[:], - BodyRoot: someRoot[:], + ProposerIndex: 1212, + Slot: 10, + ParentRoot: someRoot[:], + StateRoot: someRoot[:], + BodyRoot: someRoot[:], }, Signature: make([]byte, 96), }, Header_2: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 10, - ParentRoot: someRoot[:], - StateRoot: someRoot[:], - BodyRoot: someRoot[:], + ProposerIndex: 1212, + Slot: 10, + ParentRoot: someRoot[:], + StateRoot: someRoot[:], + BodyRoot: someRoot[:], }, Signature: make([]byte, 96), }, diff --git a/beacon-chain/db/kv/backup_test.go b/beacon-chain/db/kv/backup_test.go index bcb6c93be2..f6c27e1aa7 100644 --- a/beacon-chain/db/kv/backup_test.go +++ b/beacon-chain/db/kv/backup_test.go @@ -8,8 +8,7 @@ import ( eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" - "github.com/prysmaticlabs/prysm/beacon-chain/state" - pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/testutil" ) func TestStore_Backup(t *testing.T) { @@ -26,7 +25,7 @@ func TestStore_Backup(t *testing.T) { if err != nil { t.Fatal(err) } - st, err := state.InitializeFromProto(&pb.BeaconState{}) + st := testutil.NewBeaconState() if err := db.SaveState(ctx, st, root); err != nil { t.Fatal(err) } diff --git a/beacon-chain/db/kv/blocks.go b/beacon-chain/db/kv/blocks.go index e356648ffd..16f7fca915 100644 --- a/beacon-chain/db/kv/blocks.go +++ b/beacon-chain/db/kv/blocks.go @@ -263,9 +263,12 @@ func (k *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot") defer span.End() return k.db.Update(func(tx *bolt.Tx) error { - if featureconfig.Get().NewStateMgmt { - if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil && !k.stateSummaryCache.Has(blockRoot) { - return errors.New("no state summary found with head block root") + if !featureconfig.Get().DisableNewStateMgmt { + hasStateSummaryInCache := k.stateSummaryCache.Has(blockRoot) + hasStateSummaryInDB := tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) != nil + hasStateInDB := tx.Bucket(stateBucket).Get(blockRoot[:]) != nil + if !(hasStateInDB || hasStateSummaryInDB || hasStateSummaryInCache) { + return errors.New("no state or state summary found with head block root") } } else { if tx.Bucket(stateBucket).Get(blockRoot[:]) == nil { diff --git a/beacon-chain/db/kv/check_historical_state.go b/beacon-chain/db/kv/check_historical_state.go index c27c8adfe9..d698bae59f 100644 --- a/beacon-chain/db/kv/check_historical_state.go +++ b/beacon-chain/db/kv/check_historical_state.go @@ -12,7 +12,7 @@ import ( var historicalStateDeletedKey = []byte("historical-states-deleted") func (kv *Store) ensureNewStateServiceCompatible(ctx context.Context) error { - if !featureconfig.Get().NewStateMgmt { + if featureconfig.Get().DisableNewStateMgmt { return kv.db.Update(func(tx *bolt.Tx) error { bkt := tx.Bucket(newStateServiceCompatibleBucket) return bkt.Put(historicalStateDeletedKey, []byte{0x01}) @@ -32,9 +32,9 @@ func (kv *Store) ensureNewStateServiceCompatible(ctx context.Context) error { regenHistoricalStatesConfirmed := false var err error if historicalStateDeleted { - actionText := "Looks like you stopped using --new-state-mgmt. To reuse it, the node will need " + - "to generate and save historical states. The process may take a while, - do you want to proceed? (Y/N)" - deniedText := "Historical states will not be generated. Please remove usage --new-state-mgmt" + actionText := "--disable-new-state-mgmt was used. To proceed without the flag, the db will need " + + "to generate and save historical states. This process may take a while, - do you want to proceed? (Y/N)" + deniedText := "Historical states will not be generated. Please continue use --disable-new-state-mgmt" regenHistoricalStatesConfirmed, err = cmd.ConfirmAction(actionText, deniedText) if err != nil { @@ -42,7 +42,7 @@ func (kv *Store) ensureNewStateServiceCompatible(ctx context.Context) error { } if !regenHistoricalStatesConfirmed { - return errors.New("exiting... please do not run with flag --new-state-mgmt") + return errors.New("exiting... please use --disable-new-state-mgmt") } if err := kv.regenHistoricalStates(ctx); err != nil { diff --git a/beacon-chain/db/kv/checkpoint.go b/beacon-chain/db/kv/checkpoint.go index 0b91a8d5ce..d5282364e9 100644 --- a/beacon-chain/db/kv/checkpoint.go +++ b/beacon-chain/db/kv/checkpoint.go @@ -12,7 +12,7 @@ import ( "go.opencensus.io/trace" ) -var errMissingStateForCheckpoint = errors.New("no state exists with checkpoint root") +var errMissingStateForCheckpoint = errors.New("missing state summary for finalized root") // JustifiedCheckpoint returns the latest justified checkpoint in beacon chain. func (k *Store) JustifiedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error) { @@ -65,8 +65,11 @@ func (k *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.C } return k.db.Update(func(tx *bolt.Tx) error { bucket := tx.Bucket(checkpointBucket) - if featureconfig.Get().NewStateMgmt { - if tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) == nil && !k.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root)) { + if !featureconfig.Get().DisableNewStateMgmt { + hasStateSummaryInDB := tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) != nil + hasStateSummaryInCache := k.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root)) + hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil + if !(hasStateInDB || hasStateSummaryInDB || hasStateSummaryInCache) { return errors.New("missing state summary for finalized root") } } else { @@ -93,8 +96,11 @@ func (k *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C } return k.db.Update(func(tx *bolt.Tx) error { bucket := tx.Bucket(checkpointBucket) - if featureconfig.Get().NewStateMgmt { - if tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) == nil && !k.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root)) { + if !featureconfig.Get().DisableNewStateMgmt { + hasStateSummaryInDB := tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) != nil + hasStateSummaryInCache := k.stateSummaryCache.Has(bytesutil.ToBytes32(checkpoint.Root)) + hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil + if !(hasStateInDB || hasStateSummaryInDB || hasStateSummaryInCache) { return errors.New("missing state summary for finalized root") } } else { diff --git a/beacon-chain/db/kv/checkpoint_test.go b/beacon-chain/db/kv/checkpoint_test.go index 17371ff429..e4926f0291 100644 --- a/beacon-chain/db/kv/checkpoint_test.go +++ b/beacon-chain/db/kv/checkpoint_test.go @@ -2,14 +2,14 @@ package kv import ( "context" + "strings" "testing" "github.com/gogo/protobuf/proto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" - "github.com/prysmaticlabs/prysm/beacon-chain/state" - pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" + "github.com/prysmaticlabs/prysm/shared/testutil" ) func TestStore_JustifiedCheckpoint_CanSaveRetrieve(t *testing.T) { @@ -21,10 +21,11 @@ func TestStore_JustifiedCheckpoint_CanSaveRetrieve(t *testing.T) { Epoch: 10, Root: root[:], } - st, err := state.InitializeFromProto(&pb.BeaconState{Slot: 1}) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(1); err != nil { t.Fatal(err) } + if err := db.SaveState(ctx, st, root); err != nil { t.Fatal(err) } @@ -73,8 +74,8 @@ func TestStore_FinalizedCheckpoint_CanSaveRetrieve(t *testing.T) { if err := db.SaveBlock(ctx, blk); err != nil { t.Fatal(err) } - st, err := state.InitializeFromProto(&pb.BeaconState{Slot: 1}) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(1); err != nil { t.Fatal(err) } // a state is required to save checkpoint @@ -144,7 +145,7 @@ func TestStore_FinalizedCheckpoint_StateMustExist(t *testing.T) { Root: []byte{'B'}, } - if err := db.SaveFinalizedCheckpoint(ctx, cp); err != errMissingStateForCheckpoint { + if err := db.SaveFinalizedCheckpoint(ctx, cp); !strings.Contains(err.Error(), errMissingStateForCheckpoint.Error()) { t.Fatalf("wanted err %v, got %v", errMissingStateForCheckpoint, err) } } diff --git a/beacon-chain/db/kv/encoding.go b/beacon-chain/db/kv/encoding.go index 1e8e9985ac..4391e20abb 100644 --- a/beacon-chain/db/kv/encoding.go +++ b/beacon-chain/db/kv/encoding.go @@ -4,8 +4,11 @@ import ( "errors" "reflect" + fastssz "github.com/ferranbt/fastssz" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" + ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" ) func decode(data []byte, dst proto.Message) error { @@ -13,20 +16,49 @@ func decode(data []byte, dst proto.Message) error { if err != nil { return err } - if err := proto.Unmarshal(data, dst); err != nil { - return err + if isWhitelisted(dst) { + return dst.(fastssz.Unmarshaler).UnmarshalSSZ(data) } - return nil + return proto.Unmarshal(data, dst) } func encode(msg proto.Message) ([]byte, error) { if msg == nil || reflect.ValueOf(msg).IsNil() { return nil, errors.New("cannot encode nil message") } - enc, err := proto.Marshal(msg) - if err != nil { - return nil, err + var enc []byte + var err error + if isWhitelisted(msg) { + enc, err = msg.(fastssz.Marshaler).MarshalSSZ() + if err != nil { + return nil, err + } + } else { + enc, err = proto.Marshal(msg) + if err != nil { + return nil, err + } } - return snappy.Encode(nil, enc), nil } + +func isWhitelisted(obj interface{}) bool { + switch obj.(type) { + case *pb.BeaconState: + return true + case *ethpb.BeaconBlock: + return true + case *ethpb.Attestation: + return true + case *ethpb.Deposit: + return true + case *ethpb.AttesterSlashing: + return true + case *ethpb.ProposerSlashing: + return true + case *ethpb.VoluntaryExit: + return true + default: + return false + } +} diff --git a/beacon-chain/db/kv/finalized_block_roots_test.go b/beacon-chain/db/kv/finalized_block_roots_test.go index fb45048c43..4283b250f4 100644 --- a/beacon-chain/db/kv/finalized_block_roots_test.go +++ b/beacon-chain/db/kv/finalized_block_roots_test.go @@ -6,10 +6,9 @@ import ( ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" - "github.com/prysmaticlabs/prysm/beacon-chain/state" - pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/params" + "github.com/prysmaticlabs/prysm/shared/testutil" ) var genesisBlockRoot = bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'}) @@ -39,10 +38,7 @@ func TestStore_IsFinalizedBlock(t *testing.T) { Root: root[:], } - st, err := state.InitializeFromProto(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() // a state is required to save checkpoint if err := db.SaveState(ctx, st, root); err != nil { t.Fatal(err) @@ -115,10 +111,7 @@ func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) { Epoch: 1, } - st, err := state.InitializeFromProto(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() // A state is required to save checkpoint if err := db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint1.Root)); err != nil { t.Fatal(err) diff --git a/beacon-chain/db/kv/slashings_test.go b/beacon-chain/db/kv/slashings_test.go index 6cb6222fb6..9925297b49 100644 --- a/beacon-chain/db/kv/slashings_test.go +++ b/beacon-chain/db/kv/slashings_test.go @@ -14,7 +14,24 @@ func TestStore_ProposerSlashing_CRUD(t *testing.T) { defer teardownDB(t, db) ctx := context.Background() prop := ðpb.ProposerSlashing{ - ProposerIndex: 5, + Header_1: ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ + ProposerIndex: 5, + BodyRoot: make([]byte, 32), + ParentRoot: make([]byte, 32), + StateRoot: make([]byte, 32), + }, + Signature: make([]byte, 96), + }, + Header_2: ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ + ProposerIndex: 5, + BodyRoot: make([]byte, 32), + ParentRoot: make([]byte, 32), + StateRoot: make([]byte, 32), + }, + Signature: make([]byte, 96), + }, } slashingRoot, err := ssz.HashTreeRoot(prop) if err != nil { @@ -57,13 +74,31 @@ func TestStore_AttesterSlashing_CRUD(t *testing.T) { Data: ðpb.AttestationData{ BeaconBlockRoot: make([]byte, 32), Slot: 5, + Source: ðpb.Checkpoint{ + Epoch: 0, + Root: make([]byte, 32), + }, + Target: ðpb.Checkpoint{ + Epoch: 0, + Root: make([]byte, 32), + }, }, + Signature: make([]byte, 96), }, Attestation_2: ðpb.IndexedAttestation{ Data: ðpb.AttestationData{ BeaconBlockRoot: make([]byte, 32), Slot: 7, + Source: ðpb.Checkpoint{ + Epoch: 0, + Root: make([]byte, 32), + }, + Target: ðpb.Checkpoint{ + Epoch: 0, + Root: make([]byte, 32), + }, }, + Signature: make([]byte, 96), }, } slashingRoot, err := ssz.HashTreeRoot(att) diff --git a/beacon-chain/db/kv/state.go b/beacon-chain/db/kv/state.go index 9a9343d147..776b477ca3 100644 --- a/beacon-chain/db/kv/state.go +++ b/beacon-chain/db/kv/state.go @@ -11,7 +11,6 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/state" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/featureconfig" bolt "go.etcd.io/bbolt" "go.opencensus.io/trace" ) @@ -194,15 +193,9 @@ func (k *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error { bkt = tx.Bucket(blocksBucket) headBlkRoot := bkt.Get(headBlockRootKey) - if featureconfig.Get().NewStateMgmt { - if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil { - return errors.New("cannot delete state without state summary") - } - } else { - // Safe guard against deleting genesis, finalized, head state. - if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) { - return errors.New("cannot delete genesis, finalized, or head state") - } + // Safe guard against deleting genesis, finalized, head state. + if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) { + return errors.New("cannot delete genesis, finalized, or head state") } slot, err := slotByBlockRoot(ctx, tx, blockRoot[:]) @@ -253,15 +246,9 @@ func (k *Store) DeleteStates(ctx context.Context, blockRoots [][32]byte) error { for blockRoot, _ := c.First(); blockRoot != nil; blockRoot, _ = c.Next() { if rootMap[bytesutil.ToBytes32(blockRoot)] { - if featureconfig.Get().NewStateMgmt { - if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil { - return errors.New("cannot delete state without state summary") - } - } else { - // Safe guard against deleting genesis, finalized, head state. - if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) { - return errors.New("cannot delete genesis, finalized, or head state") - } + // Safe guard against deleting genesis, finalized, head state. + if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) { + return errors.New("cannot delete genesis, finalized, or head state") } slot, err := slotByBlockRoot(ctx, tx, blockRoot) @@ -296,47 +283,45 @@ func slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []byte) (uint64 ctx, span := trace.StartSpan(ctx, "BeaconDB.slotByBlockRoot") defer span.End() - if featureconfig.Get().NewStateMgmt { - bkt := tx.Bucket(stateSummaryBucket) - enc := bkt.Get(blockRoot) - if enc == nil { - return 0, errors.New("state summary enc can't be nil") - } - stateSummary := &pb.StateSummary{} - if err := decode(enc, stateSummary); err != nil { - return 0, err - } - return stateSummary.Slot, nil - } - - bkt := tx.Bucket(blocksBucket) + bkt := tx.Bucket(stateSummaryBucket) enc := bkt.Get(blockRoot) + if enc == nil { - // fallback and check the state. - bkt = tx.Bucket(stateBucket) - enc = bkt.Get(blockRoot) + // Fall back to check the block. + bkt := tx.Bucket(blocksBucket) + enc := bkt.Get(blockRoot) + if enc == nil { - return 0, errors.New("state enc can't be nil") + // Fallback and check the state. + bkt = tx.Bucket(stateBucket) + enc = bkt.Get(blockRoot) + if enc == nil { + return 0, errors.New("state enc can't be nil") + } + s, err := createState(enc) + if err != nil { + return 0, err + } + if s == nil { + return 0, errors.New("state can't be nil") + } + return s.Slot, nil } - s, err := createState(enc) + b := ðpb.SignedBeaconBlock{} + err := decode(enc, b) if err != nil { return 0, err } - if s == nil { - return 0, errors.New("state can't be nil") + if b.Block == nil { + return 0, errors.New("block can't be nil") } - return s.Slot, nil + return b.Block.Slot, nil } - - b := ðpb.SignedBeaconBlock{} - err := decode(enc, b) - if err != nil { + stateSummary := &pb.StateSummary{} + if err := decode(enc, stateSummary); err != nil { return 0, err } - if b.Block == nil { - return 0, errors.New("block can't be nil") - } - return b.Block.Slot, nil + return stateSummary.Slot, nil } // HighestSlotStates returns the states with the highest slot from the db. diff --git a/beacon-chain/db/kv/state_test.go b/beacon-chain/db/kv/state_test.go index aec6bf24a9..7c3f894c70 100644 --- a/beacon-chain/db/kv/state_test.go +++ b/beacon-chain/db/kv/state_test.go @@ -8,24 +8,23 @@ import ( "github.com/gogo/protobuf/proto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" - "github.com/prysmaticlabs/prysm/beacon-chain/state" - pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" + "github.com/prysmaticlabs/prysm/shared/testutil" + "gopkg.in/d4l3k/messagediff.v1" ) func TestState_CanSaveRetrieve(t *testing.T) { db := setupDB(t) defer teardownDB(t, db) - s := &pb.BeaconState{Slot: 100} r := [32]byte{'A'} if db.HasState(context.Background(), r) { t.Fatal("Wanted false") } - st, err := state.InitializeFromProto(s) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(100); err != nil { t.Fatal(err) } @@ -42,8 +41,9 @@ func TestState_CanSaveRetrieve(t *testing.T) { t.Fatal(err) } - if !reflect.DeepEqual(st, savedS) { - t.Errorf("Did not retrieve saved state: %v != %v", s, savedS) + if !reflect.DeepEqual(st.InnerStateUnsafe(), savedS.InnerStateUnsafe()) { + diff, _ := messagediff.PrettyDiff(st.InnerStateUnsafe(), savedS.InnerStateUnsafe()) + t.Errorf("Did not retrieve saved state: %v", diff) } savedS, err = db.State(context.Background(), [32]byte{'B'}) @@ -60,11 +60,10 @@ func TestHeadState_CanSaveRetrieve(t *testing.T) { db := setupDB(t) defer teardownDB(t, db) - s := &pb.BeaconState{Slot: 100} headRoot := [32]byte{'A'} - st, err := state.InitializeFromProto(s) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(100); err != nil { t.Fatal(err) } @@ -81,7 +80,7 @@ func TestHeadState_CanSaveRetrieve(t *testing.T) { t.Fatal(err) } - if !reflect.DeepEqual(st, savedHeadS) { + if !reflect.DeepEqual(st.InnerStateUnsafe(), savedHeadS.InnerStateUnsafe()) { t.Error("did not retrieve saved state") } } @@ -90,11 +89,10 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) { db := setupDB(t) defer teardownDB(t, db) - s := &pb.BeaconState{Slot: 1} headRoot := [32]byte{'B'} - st, err := state.InitializeFromProto(s) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(1); err != nil { t.Fatal(err) } @@ -111,7 +109,7 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) { t.Fatal(err) } - if !reflect.DeepEqual(st, savedGenesisS) { + if !reflect.DeepEqual(st.InnerStateUnsafe(), savedGenesisS.InnerStateUnsafe()) { t.Error("did not retrieve saved state") } @@ -148,8 +146,8 @@ func TestStore_StatesBatchDelete(t *testing.T) { if err != nil { t.Fatal(err) } - st, err := state.InitializeFromProto(&pb.BeaconState{Slot: uint64(i)}) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(uint64(i)); err != nil { t.Fatal(err) } if err := db.SaveState(context.Background(), st, r); err != nil { @@ -191,9 +189,8 @@ func TestStore_DeleteGenesisState(t *testing.T) { if err := db.SaveGenesisBlockRoot(ctx, genesisBlockRoot); err != nil { t.Fatal(err) } - genesisState := &pb.BeaconState{Slot: 100} - st, err := state.InitializeFromProto(genesisState) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(100); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, st, genesisBlockRoot); err != nil { @@ -230,8 +227,8 @@ func TestStore_DeleteFinalizedState(t *testing.T) { t.Fatal(err) } - finalizedState, err := state.InitializeFromProto(&pb.BeaconState{Slot: 100}) - if err != nil { + finalizedState := testutil.NewBeaconState() + if err := finalizedState.SetSlot(100); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, finalizedState, finalizedBlockRoot); err != nil { @@ -243,6 +240,7 @@ func TestStore_DeleteFinalizedState(t *testing.T) { } wantedErr := "cannot delete genesis, finalized, or head state" if err := db.DeleteState(ctx, finalizedBlockRoot); err.Error() != wantedErr { + t.Log(err.Error()) t.Error("Did not receive wanted error") } } @@ -271,9 +269,8 @@ func TestStore_DeleteHeadState(t *testing.T) { if err != nil { t.Fatal(err) } - headState := &pb.BeaconState{Slot: 100} - st, err := state.InitializeFromProto(headState) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(100); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, st, headBlockRoot); err != nil { @@ -292,7 +289,6 @@ func TestStore_SaveDeleteState_CanGetHighest(t *testing.T) { db := setupDB(t) defer teardownDB(t, db) - s0 := &pb.BeaconState{Slot: 1} b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}} r, err := ssz.HashTreeRoot(b.Block) if err != nil { @@ -301,15 +297,15 @@ func TestStore_SaveDeleteState_CanGetHighest(t *testing.T) { if err := db.SaveBlock(context.Background(), b); err != nil { t.Fatal(err) } - st, err := state.InitializeFromProto(s0) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() if err := db.SaveState(context.Background(), st, r); err != nil { t.Fatal(err) } + if err := db.SaveGenesisBlockRoot(context.Background(), r); err != nil { + t.Error(err) + } + s0 := st.InnerStateUnsafe() - s1 := &pb.BeaconState{Slot: 999} b = ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 999}} r1, err := ssz.HashTreeRoot(b.Block) if err != nil { @@ -318,10 +314,11 @@ func TestStore_SaveDeleteState_CanGetHighest(t *testing.T) { if err := db.SaveBlock(context.Background(), b); err != nil { t.Fatal(err) } - st, err = state.InitializeFromProto(s1) - if err != nil { + st = testutil.NewBeaconState() + if err := st.SetSlot(999); err != nil { t.Fatal(err) } + s1 := st.InnerStateUnsafe() if err := db.SaveState(context.Background(), st, r1); err != nil { t.Fatal(err) } @@ -334,7 +331,6 @@ func TestStore_SaveDeleteState_CanGetHighest(t *testing.T) { t.Errorf("Did not retrieve saved state: %v != %v", highest, s1) } - s2 := &pb.BeaconState{Slot: 1000} b = ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1000}} r2, err := ssz.HashTreeRoot(b.Block) if err != nil { @@ -343,10 +339,11 @@ func TestStore_SaveDeleteState_CanGetHighest(t *testing.T) { if err := db.SaveBlock(context.Background(), b); err != nil { t.Fatal(err) } - st, err = state.InitializeFromProto(s2) - if err != nil { + st = testutil.NewBeaconState() + if err := st.SetSlot(1000); err != nil { t.Fatal(err) } + s2 := st.InnerStateUnsafe() if err := db.SaveState(context.Background(), st, r2); err != nil { t.Fatal(err) } @@ -377,8 +374,12 @@ func TestStore_SaveDeleteState_CanGetHighest(t *testing.T) { if err != nil { t.Fatal(err) } + if highest[0] == nil { + t.Fatal("returned nil state ") + } if !proto.Equal(highest[0].InnerStateUnsafe(), s0) { - t.Errorf("Did not retrieve saved state: %v != %v", highest, s1) + diff, _ := messagediff.PrettyDiff(highest[0].InnerStateUnsafe(), s0) + t.Errorf("Did not retrieve saved state: %v", diff) } } @@ -386,7 +387,6 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) { db := setupDB(t) defer teardownDB(t, db) - s0 := &pb.BeaconState{Slot: 1} b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}} r, err := ssz.HashTreeRoot(b.Block) if err != nil { @@ -395,15 +395,15 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) { if err := db.SaveBlock(context.Background(), b); err != nil { t.Fatal(err) } - st, err := state.InitializeFromProto(s0) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(1); err != nil { t.Fatal(err) } + s0 := st.InnerStateUnsafe() if err := db.SaveState(context.Background(), st, r); err != nil { t.Fatal(err) } - s1 := &pb.BeaconState{Slot: 100} b = ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 100}} r1, err := ssz.HashTreeRoot(b.Block) if err != nil { @@ -412,10 +412,11 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) { if err := db.SaveBlock(context.Background(), b); err != nil { t.Fatal(err) } - st, err = state.InitializeFromProto(s1) - if err != nil { + st = testutil.NewBeaconState() + if err := st.SetSlot(100); err != nil { t.Fatal(err) } + s1 := st.InnerStateUnsafe() if err := db.SaveState(context.Background(), st, r1); err != nil { t.Fatal(err) } @@ -428,7 +429,6 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) { t.Errorf("Did not retrieve saved state: %v != %v", highest, s1) } - s2 := &pb.BeaconState{Slot: 1000} b = ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1000}} r2, err := ssz.HashTreeRoot(b.Block) if err != nil { @@ -437,10 +437,12 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) { if err := db.SaveBlock(context.Background(), b); err != nil { t.Fatal(err) } - st, err = state.InitializeFromProto(s2) - if err != nil { + st = testutil.NewBeaconState() + if err := st.SetSlot(1000); err != nil { t.Fatal(err) } + s2 := st.InnerStateUnsafe() + if err := db.SaveState(context.Background(), st, r2); err != nil { t.Fatal(err) } @@ -474,11 +476,7 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) { db := setupDB(t) defer teardownDB(t, db) - s := &pb.BeaconState{} - genesisState, err := state.InitializeFromProto(s) - if err != nil { - t.Fatal(err) - } + genesisState := testutil.NewBeaconState() genesisRoot := [32]byte{'a'} if err := db.SaveGenesisBlockRoot(context.Background(), genesisRoot); err != nil { t.Fatal(err) @@ -487,7 +485,6 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) { t.Fatal(err) } - s0 := &pb.BeaconState{Slot: 1} b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}} r, err := ssz.HashTreeRoot(b.Block) if err != nil { @@ -496,8 +493,9 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) { if err := db.SaveBlock(context.Background(), b); err != nil { t.Fatal(err) } - st, err := state.InitializeFromProto(s0) - if err != nil { + + st := testutil.NewBeaconState() + if err := st.SetSlot(1); err != nil { t.Fatal(err) } if err := db.SaveState(context.Background(), st, r); err != nil { @@ -508,8 +506,8 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) { if err != nil { t.Fatal(err) } - if !proto.Equal(highest[0].InnerStateUnsafe(), s0) { - t.Errorf("Did not retrieve saved state: %v != %v", highest, s0) + if !proto.Equal(highest[0].InnerStateUnsafe(), st.InnerStateUnsafe()) { + t.Errorf("Did not retrieve saved state: %v != %v", highest, st.InnerStateUnsafe()) } highest, err = db.HighestSlotStatesBelow(context.Background(), 1) @@ -517,13 +515,13 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) { t.Fatal(err) } if !proto.Equal(highest[0].InnerStateUnsafe(), genesisState.InnerStateUnsafe()) { - t.Errorf("Did not retrieve saved state: %v != %v", highest, s0) + t.Errorf("Did not retrieve saved state: %v != %v", highest, genesisState.InnerStateUnsafe()) } highest, err = db.HighestSlotStatesBelow(context.Background(), 0) if err != nil { t.Fatal(err) } if !proto.Equal(highest[0].InnerStateUnsafe(), genesisState.InnerStateUnsafe()) { - t.Errorf("Did not retrieve saved state: %v != %v", highest, s0) + t.Errorf("Did not retrieve saved state: %v != %v", highest, genesisState.InnerStateUnsafe()) } } diff --git a/beacon-chain/flags/base.go b/beacon-chain/flags/base.go index 8f04d9d800..478493cee8 100644 --- a/beacon-chain/flags/base.go +++ b/beacon-chain/flags/base.go @@ -105,9 +105,9 @@ var ( Usage: "The slot durations of when an archived state gets saved in the DB.", Value: 128, } - // EnableDiscv5 enables running discv5. - EnableDiscv5 = &cli.BoolFlag{ - Name: "enable-discv5", - Usage: "Starts dv5 dht.", + // DisableDiscv5 disables running discv5. + DisableDiscv5 = &cli.BoolFlag{ + Name: "disable-discv5", + Usage: "Does not run the discoveryV5 dht.", } ) diff --git a/beacon-chain/flags/config.go b/beacon-chain/flags/config.go index f7332321a9..e65ff77482 100644 --- a/beacon-chain/flags/config.go +++ b/beacon-chain/flags/config.go @@ -14,7 +14,7 @@ type GlobalFlags struct { EnableArchivedBlocks bool EnableArchivedAttestations bool UnsafeSync bool - EnableDiscv5 bool + DisableDiscv5 bool MinimumSyncPeers int MaxPageSize int DeploymentBlock int @@ -54,8 +54,8 @@ func ConfigureGlobalFlags(ctx *cli.Context) { if ctx.Bool(UnsafeSync.Name) { cfg.UnsafeSync = true } - if ctx.Bool(EnableDiscv5.Name) { - cfg.EnableDiscv5 = true + if ctx.Bool(DisableDiscv5.Name) { + cfg.DisableDiscv5 = true } cfg.MaxPageSize = ctx.Int(RPCMaxPageSize.Name) cfg.DeploymentBlock = ctx.Int(ContractDeploymentBlock.Name) diff --git a/beacon-chain/interop-cold-start/service.go b/beacon-chain/interop-cold-start/service.go index 5600e5bc2e..0a3edfd9d8 100644 --- a/beacon-chain/interop-cold-start/service.go +++ b/beacon-chain/interop-cold-start/service.go @@ -166,6 +166,12 @@ func (s *Service) saveGenesisState(ctx context.Context, genesisState *stateTrie. if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil { return errors.Wrap(err, "could not save genesis block") } + if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{ + Slot: 0, + Root: genesisBlkRoot[:], + }); err != nil { + return err + } if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil { return errors.Wrap(err, "could not save genesis state") } diff --git a/beacon-chain/main.go b/beacon-chain/main.go index 362405fe1c..b492509f13 100644 --- a/beacon-chain/main.go +++ b/beacon-chain/main.go @@ -38,7 +38,7 @@ var appFlags = []cli.Flag{ flags.ContractDeploymentBlock, flags.SetGCPercent, flags.UnsafeSync, - flags.EnableDiscv5, + flags.DisableDiscv5, flags.InteropMockEth1DataVotesFlag, flags.InteropGenesisStateFlag, flags.InteropNumValidatorsFlag, @@ -59,6 +59,7 @@ var appFlags = []cli.Flag{ cmd.P2PHostDNS, cmd.P2PMaxPeers, cmd.P2PPrivKey, + cmd.P2PMetadata, cmd.P2PWhitelist, cmd.P2PEncoding, cmd.DataDirFlag, diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go index f382d046e1..4709b96996 100644 --- a/beacon-chain/node/node.go +++ b/beacon-chain/node/node.go @@ -298,13 +298,15 @@ func (b *BeaconNode) registerP2P(ctx *cli.Context) error { HostAddress: ctx.String(cmd.P2PHost.Name), HostDNS: ctx.String(cmd.P2PHostDNS.Name), PrivateKey: ctx.String(cmd.P2PPrivKey.Name), + MetaDataDir: ctx.String(cmd.P2PMetadata.Name), TCPPort: ctx.Uint(cmd.P2PTCPPort.Name), UDPPort: ctx.Uint(cmd.P2PUDPPort.Name), MaxPeers: ctx.Uint(cmd.P2PMaxPeers.Name), WhitelistCIDR: ctx.String(cmd.P2PWhitelist.Name), EnableUPnP: ctx.Bool(cmd.EnableUPnPFlag.Name), - EnableDiscv5: ctx.Bool(flags.EnableDiscv5.Name), + DisableDiscv5: ctx.Bool(flags.DisableDiscv5.Name), Encoding: ctx.String(cmd.P2PEncoding.Name), + StateNotifier: b, }) if err != nil { return err @@ -441,6 +443,7 @@ func (b *BeaconNode) registerSyncService(ctx *cli.Context) error { ExitPool: b.exitPool, SlashingPool: b.slashingsPool, StateSummaryCache: b.stateSummaryCache, + StateGen: b.stateGen, }) return b.services.RegisterService(rs) diff --git a/beacon-chain/operations/attestations/BUILD.bazel b/beacon-chain/operations/attestations/BUILD.bazel index 2b7a5c5e77..4abc1eb8d6 100644 --- a/beacon-chain/operations/attestations/BUILD.bazel +++ b/beacon-chain/operations/attestations/BUILD.bazel @@ -3,7 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "aggregate.go", "log.go", "metrics.go", "pool.go", @@ -34,7 +33,6 @@ go_library( go_test( name = "go_default_test", srcs = [ - "aggregate_test.go", "pool_test.go", "prepare_forkchoice_test.go", "prune_expired_test.go", @@ -50,6 +48,5 @@ go_test( "@com_github_gogo_protobuf//proto:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", - "@in_gopkg_d4l3k_messagediff_v1//:go_default_library", ], ) diff --git a/beacon-chain/operations/attestations/aggregate.go b/beacon-chain/operations/attestations/aggregate.go deleted file mode 100644 index 5f37d59216..0000000000 --- a/beacon-chain/operations/attestations/aggregate.go +++ /dev/null @@ -1,79 +0,0 @@ -package attestations - -import ( - "context" - "time" - - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - "github.com/prysmaticlabs/prysm/shared/params" - "go.opencensus.io/trace" -) - -// Define time to aggregate the unaggregated attestations at 2 times per slot, this gives -// enough confidence all the unaggregated attestations will be aggregated as aggregator requests. -var timeToAggregate = time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second - -// This kicks off a routine to aggregate the unaggregated attestations from pool. -func (s *Service) aggregateRoutine() { - ticker := time.NewTicker(timeToAggregate) - ctx := context.TODO() - for { - select { - case <-s.ctx.Done(): - return - case <-ticker.C: - attsToBeAggregated := append(s.pool.UnaggregatedAttestations(), s.pool.AggregatedAttestations()...) - if err := s.aggregateAttestations(ctx, attsToBeAggregated); err != nil { - log.WithError(err).Error("Could not aggregate attestation") - } - - // Update metrics for aggregated and unaggregated attestations count. - s.updateMetrics() - } - } -} - -// This aggregates the input attestations via AggregateAttestations helper -// function. -func (s *Service) aggregateAttestations(ctx context.Context, attsToBeAggregated []*ethpb.Attestation) error { - ctx, span := trace.StartSpan(ctx, "Operations.attestations.aggregateAttestations") - defer span.End() - - attsByRoot := make(map[[32]byte][]*ethpb.Attestation) - - for _, att := range attsToBeAggregated { - attDataRoot, err := ssz.HashTreeRoot(att.Data) - if err != nil { - return err - } - attsByRoot[attDataRoot] = append(attsByRoot[attDataRoot], att) - } - - for _, atts := range attsByRoot { - for _, att := range atts { - if !helpers.IsAggregated(att) && len(atts) > 1 { - if err := s.pool.DeleteUnaggregatedAttestation(att); err != nil { - return err - } - } - } - } - - for _, atts := range attsByRoot { - aggregatedAtts, err := helpers.AggregateAttestations(atts) - if err != nil { - return err - } - for _, att := range aggregatedAtts { - if helpers.IsAggregated(att) { - if err := s.pool.SaveAggregatedAttestation(att); err != nil { - return err - } - } - } - } - - return nil -} diff --git a/beacon-chain/operations/attestations/aggregate_test.go b/beacon-chain/operations/attestations/aggregate_test.go deleted file mode 100644 index f008d05682..0000000000 --- a/beacon-chain/operations/attestations/aggregate_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package attestations - -import ( - "context" - "reflect" - "sort" - "testing" - - "github.com/gogo/protobuf/proto" - ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - "github.com/prysmaticlabs/prysm/shared/bls" - "gopkg.in/d4l3k/messagediff.v1" -) - -func TestAggregateAttestations_SingleAttestation(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) - if err != nil { - t.Fatal(err) - } - - sk := bls.RandKey() - sig := sk.Sign([]byte("dummy_test_data"), 0 /*domain*/) - - unaggregatedAtts := []*ethpb.Attestation{ - {Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b100001}, Signature: sig.Marshal()}, - } - - if err := s.aggregateAttestations(context.Background(), unaggregatedAtts); err != nil { - t.Fatal(err) - } - - if len(s.pool.AggregatedAttestations()) != 0 { - t.Error("Nothing should be aggregated") - } - - if len(s.pool.UnaggregatedAttestations()) != 0 { - t.Error("Unaggregated pool should be empty") - } -} - -func TestAggregateAttestations_MultipleAttestationsSameRoot(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) - if err != nil { - t.Fatal(err) - } - - sk := bls.RandKey() - sig := sk.Sign([]byte("dummy_test_data"), 0 /*domain*/) - - data := ðpb.AttestationData{ - Source: ðpb.Checkpoint{}, - Target: ðpb.Checkpoint{}, - } - attsToBeAggregated := []*ethpb.Attestation{ - {Data: data, AggregationBits: bitfield.Bitlist{0b110001}, Signature: sig.Marshal()}, - {Data: data, AggregationBits: bitfield.Bitlist{0b100010}, Signature: sig.Marshal()}, - {Data: data, AggregationBits: bitfield.Bitlist{0b101100}, Signature: sig.Marshal()}, - } - - if err := s.aggregateAttestations(context.Background(), attsToBeAggregated); err != nil { - t.Fatal(err) - } - - if len(s.pool.UnaggregatedAttestations()) != 0 { - t.Error("Nothing should be unaggregated") - } - - wanted, err := helpers.AggregateAttestations(attsToBeAggregated) - if err != nil { - t.Fatal(err) - } - got := s.pool.AggregatedAttestations() - if !reflect.DeepEqual(wanted, got) { - diff, _ := messagediff.PrettyDiff(got[0], wanted[0]) - t.Log(diff) - t.Error("Did not aggregate attestations") - } -} - -func TestAggregateAttestations_MultipleAttestationsDifferentRoots(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) - if err != nil { - t.Fatal(err) - } - mockRoot := [32]byte{} - d := ðpb.AttestationData{ - BeaconBlockRoot: mockRoot[:], - Source: ðpb.Checkpoint{Root: mockRoot[:]}, - Target: ðpb.Checkpoint{Root: mockRoot[:]}, - } - d1, ok := proto.Clone(d).(*ethpb.AttestationData) - if !ok { - t.Fatal("Entity is not of type *ethpb.AttestationData") - } - d1.Slot = 1 - d2, ok := proto.Clone(d).(*ethpb.AttestationData) - if !ok { - t.Fatal("Entity is not of type *ethpb.AttestationData") - } - d2.Slot = 2 - - sk := bls.RandKey() - sig := sk.Sign([]byte("dummy_test_data"), 0 /*domain*/) - - atts := []*ethpb.Attestation{ - {Data: d, AggregationBits: bitfield.Bitlist{0b100001}, Signature: sig.Marshal()}, - {Data: d, AggregationBits: bitfield.Bitlist{0b100010}, Signature: sig.Marshal()}, - {Data: d1, AggregationBits: bitfield.Bitlist{0b100001}, Signature: sig.Marshal()}, - {Data: d1, AggregationBits: bitfield.Bitlist{0b100110}, Signature: sig.Marshal()}, - {Data: d2, AggregationBits: bitfield.Bitlist{0b100100}, Signature: sig.Marshal()}, - } - - if err := s.aggregateAttestations(context.Background(), atts); err != nil { - t.Fatal(err) - } - - if len(s.pool.UnaggregatedAttestations()) != 0 { - t.Error("Unaggregated att pool did not clean up") - } - - received := s.pool.AggregatedAttestations() - sort.Slice(received, func(i, j int) bool { - return received[i].Data.Slot < received[j].Data.Slot - }) - att1, err := helpers.AggregateAttestations([]*ethpb.Attestation{atts[0], atts[1]}) - if err != nil { - t.Error(err) - } - att2, err := helpers.AggregateAttestations([]*ethpb.Attestation{atts[2], atts[3]}) - if err != nil { - t.Error(err) - } - wanted := append(att1, att2...) - if !reflect.DeepEqual(wanted, received) { - t.Error("Did not aggregate attestations") - } -} diff --git a/beacon-chain/operations/attestations/kv/BUILD.bazel b/beacon-chain/operations/attestations/kv/BUILD.bazel index 8adab3f6fa..be98776d75 100644 --- a/beacon-chain/operations/attestations/kv/BUILD.bazel +++ b/beacon-chain/operations/attestations/kv/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//shared/hashutil:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", + "@com_github_prysmaticlabs_go_ssz//:go_default_library", ], ) @@ -31,6 +32,7 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//shared/bls:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", ], diff --git a/beacon-chain/operations/attestations/kv/aggregated.go b/beacon-chain/operations/attestations/kv/aggregated.go index f9a3b5a24d..eab810f7ed 100644 --- a/beacon-chain/operations/attestations/kv/aggregated.go +++ b/beacon-chain/operations/attestations/kv/aggregated.go @@ -3,10 +3,68 @@ package kv import ( "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/go-ssz" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" ) +// AggregateUnaggregatedAttestations aggregates the unaggregated attestations and save the +// newly aggregated attestations in the pool. +// It tracks the unaggregated attestations that weren't able to aggregate to prevent +// the deletion of unaggregated attestations in the pool. +func (p *AttCaches) AggregateUnaggregatedAttestations() error { + attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation) + unaggregatedAtts := p.UnaggregatedAttestations() + for _, att := range unaggregatedAtts { + attDataRoot, err := ssz.HashTreeRoot(att.Data) + if err != nil { + return err + } + attsByDataRoot[attDataRoot] = append(attsByDataRoot[attDataRoot], att) + } + + // Aggregate unaggregated attestations from the pool and save them in the pool. + // Track the unaggregated attestations that aren't able to aggregate. + leftOverUnaggregatedAtt := make(map[[32]byte]bool) + for _, atts := range attsByDataRoot { + aggregatedAtts := make([]*ethpb.Attestation, 0, len(atts)) + processedAtts, err := helpers.AggregateAttestations(atts) + if err != nil { + return err + } + for _, att := range processedAtts { + if helpers.IsAggregated(att) { + aggregatedAtts = append(aggregatedAtts, att) + } else { + h, err := ssz.HashTreeRoot(att) + if err != nil { + return err + } + leftOverUnaggregatedAtt[h] = true + } + } + if err := p.SaveAggregatedAttestations(aggregatedAtts); err != nil { + return err + } + } + + // Remove the unaggregated attestations from the pool that were successfully aggregated. + for _, att := range unaggregatedAtts { + h, err := ssz.HashTreeRoot(att) + if err != nil { + return err + } + if leftOverUnaggregatedAtt[h] { + continue + } + if err := p.DeleteUnaggregatedAttestation(att); err != nil { + return err + } + } + + return nil +} + // SaveAggregatedAttestation saves an aggregated attestation in cache. func (p *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error { if att == nil || att.Data == nil { diff --git a/beacon-chain/operations/attestations/kv/aggregated_test.go b/beacon-chain/operations/attestations/kv/aggregated_test.go index 99bbf56c8c..32ef602093 100644 --- a/beacon-chain/operations/attestations/kv/aggregated_test.go +++ b/beacon-chain/operations/attestations/kv/aggregated_test.go @@ -3,21 +3,39 @@ package kv import ( "reflect" "sort" - "strings" "testing" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" + "github.com/prysmaticlabs/prysm/shared/bls" ) -func TestKV_Aggregated_NotAggregated(t *testing.T) { +func TestKV_AggregateUnaggregatedAttestations(t *testing.T) { cache := NewAttCaches() + priv := bls.RandKey() + sig1 := priv.Sign([]byte{'a'}) + sig2 := priv.Sign([]byte{'b'}) + att1 := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig1.Marshal()} + att2 := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1010}, Signature: sig1.Marshal()} + att3 := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1100}, Signature: sig1.Marshal()} + att4 := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig2.Marshal()} + att5 := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig1.Marshal()} + att6 := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1010}, Signature: sig1.Marshal()} + att7 := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1100}, Signature: sig1.Marshal()} + att8 := ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig2.Marshal()} + atts := []*ethpb.Attestation{att1, att2, att3, att4, att5, att6, att7, att8} + if err := cache.SaveUnaggregatedAttestations(atts); err != nil { + t.Fatal(err) + } + if err := cache.AggregateUnaggregatedAttestations(); err != nil { + t.Fatal(err) + } - att := ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b11}, Data: ðpb.AttestationData{}} - - wanted := "attestation is not aggregated" - if err := cache.SaveAggregatedAttestation(att); !strings.Contains(err.Error(), wanted) { - t.Error("Did not received wanted error") + if len(cache.AggregatedAttestationsBySlotIndex(1, 0)) != 1 { + t.Fatal("Did not aggregate correctly") + } + if len(cache.AggregatedAttestationsBySlotIndex(2, 0)) != 1 { + t.Fatal("Did not aggregate correctly") } } diff --git a/beacon-chain/operations/attestations/pool.go b/beacon-chain/operations/attestations/pool.go index b03b989691..c8ffac08db 100644 --- a/beacon-chain/operations/attestations/pool.go +++ b/beacon-chain/operations/attestations/pool.go @@ -11,6 +11,7 @@ import ( // for aggregator actor. type Pool interface { // For Aggregated attestations + AggregateUnaggregatedAttestations() error SaveAggregatedAttestation(att *ethpb.Attestation) error SaveAggregatedAttestations(atts []*ethpb.Attestation) error AggregatedAttestations() []*ethpb.Attestation diff --git a/beacon-chain/operations/attestations/prepare_forkchoice.go b/beacon-chain/operations/attestations/prepare_forkchoice.go index 3e525c87de..1d69732ec5 100644 --- a/beacon-chain/operations/attestations/prepare_forkchoice.go +++ b/beacon-chain/operations/attestations/prepare_forkchoice.go @@ -46,8 +46,10 @@ func (s *Service) batchForkChoiceAtts(ctx context.Context) error { attsByDataRoot := make(map[[32]byte][]*ethpb.Attestation) - atts := append(s.pool.UnaggregatedAttestations(), s.pool.AggregatedAttestations()...) - atts = append(atts, s.pool.BlockAttestations()...) + if err := s.pool.AggregateUnaggregatedAttestations(); err != nil { + return err + } + atts := append(s.pool.AggregatedAttestations(), s.pool.BlockAttestations()...) atts = append(atts, s.pool.ForkchoiceAttestations()...) // Consolidate attestations by aggregating them by similar data root. diff --git a/beacon-chain/operations/attestations/prepare_forkchoice_test.go b/beacon-chain/operations/attestations/prepare_forkchoice_test.go index 5939842cc3..b89d781fac 100644 --- a/beacon-chain/operations/attestations/prepare_forkchoice_test.go +++ b/beacon-chain/operations/attestations/prepare_forkchoice_test.go @@ -20,7 +20,7 @@ func TestBatchAttestations_Multiple(t *testing.T) { } sk := bls.RandKey() - sig := sk.Sign([]byte("dummy_test_data"), 0 /*domain*/) + sig := sk.Sign([]byte("dummy_test_data")) var mockRoot [32]byte unaggregatedAtts := []*ethpb.Attestation{ @@ -98,21 +98,24 @@ func TestBatchAttestations_Multiple(t *testing.T) { t.Fatal(err) } - wanted, err := helpers.AggregateAttestations([]*ethpb.Attestation{unaggregatedAtts[0], aggregatedAtts[0], blockAtts[0]}) + wanted, err := helpers.AggregateAttestations([]*ethpb.Attestation{aggregatedAtts[0], blockAtts[0]}) if err != nil { t.Fatal(err) } - aggregated, err := helpers.AggregateAttestations([]*ethpb.Attestation{unaggregatedAtts[1], aggregatedAtts[1], blockAtts[1]}) + aggregated, err := helpers.AggregateAttestations([]*ethpb.Attestation{aggregatedAtts[1], blockAtts[1]}) if err != nil { t.Fatal(err) } wanted = append(wanted, aggregated...) - aggregated, err = helpers.AggregateAttestations([]*ethpb.Attestation{unaggregatedAtts[2], aggregatedAtts[2], blockAtts[2]}) + aggregated, err = helpers.AggregateAttestations([]*ethpb.Attestation{aggregatedAtts[2], blockAtts[2]}) if err != nil { t.Fatal(err) } wanted = append(wanted, aggregated...) + if err := s.pool.AggregateUnaggregatedAttestations(); err != nil { + return + } received := s.pool.ForkchoiceAttestations() sort.Slice(received, func(i, j int) bool { @@ -134,7 +137,7 @@ func TestBatchAttestations_Single(t *testing.T) { } sk := bls.RandKey() - sig := sk.Sign([]byte("dummy_test_data"), 0 /*domain*/) + sig := sk.Sign([]byte("dummy_test_data")) mockRoot := [32]byte{} d := ðpb.AttestationData{ BeaconBlockRoot: mockRoot[:], @@ -194,7 +197,7 @@ func TestAggregateAndSaveForkChoiceAtts_Single(t *testing.T) { } sk := bls.RandKey() - sig := sk.Sign([]byte("dummy_test_data"), 0 /*domain*/) + sig := sk.Sign([]byte("dummy_test_data")) mockRoot := [32]byte{} d := ðpb.AttestationData{ BeaconBlockRoot: mockRoot[:], @@ -226,7 +229,7 @@ func TestAggregateAndSaveForkChoiceAtts_Multiple(t *testing.T) { } sk := bls.RandKey() - sig := sk.Sign([]byte("dummy_test_data"), 0 /*domain*/) + sig := sk.Sign([]byte("dummy_test_data")) mockRoot := [32]byte{} d := ðpb.AttestationData{ BeaconBlockRoot: mockRoot[:], diff --git a/beacon-chain/operations/attestations/service.go b/beacon-chain/operations/attestations/service.go index c17f8aa730..44b9afa7ef 100644 --- a/beacon-chain/operations/attestations/service.go +++ b/beacon-chain/operations/attestations/service.go @@ -43,7 +43,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) { // Start an attestation pool service's main event loop. func (s *Service) Start() { go s.prepareForkChoiceAtts() - go s.aggregateRoutine() go s.pruneAttsPool() } diff --git a/beacon-chain/operations/slashings/service.go b/beacon-chain/operations/slashings/service.go index b3a011ab80..fc15ed635b 100644 --- a/beacon-chain/operations/slashings/service.go +++ b/beacon-chain/operations/slashings/service.go @@ -150,7 +150,7 @@ func (p *Pool) InsertProposerSlashing( return errors.Wrap(err, "could not verify proposer slashing") } - idx := slashing.ProposerIndex + idx := slashing.Header_1.Header.ProposerIndex ok, err := p.validatorSlashingPreconditionCheck(state, idx) if err != nil { return err @@ -166,16 +166,17 @@ func (p *Pool) InsertProposerSlashing( // Check if the validator already exists in the list of slashings. // Use binary search to find the answer. found := sort.Search(len(p.pendingProposerSlashing), func(i int) bool { - return p.pendingProposerSlashing[i].ProposerIndex >= slashing.ProposerIndex + return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= slashing.Header_1.Header.ProposerIndex }) - if found != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[found].ProposerIndex == slashing.ProposerIndex { + if found != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[found].Header_1.Header.ProposerIndex == + slashing.Header_1.Header.ProposerIndex { return errors.New("slashing object already exists in pending proposer slashings") } // Insert into pending list and sort again. p.pendingProposerSlashing = append(p.pendingProposerSlashing, slashing) sort.Slice(p.pendingProposerSlashing, func(i, j int) bool { - return p.pendingProposerSlashing[i].ProposerIndex < p.pendingProposerSlashing[j].ProposerIndex + return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex < p.pendingProposerSlashing[j].Header_1.Header.ProposerIndex }) return nil } @@ -206,12 +207,12 @@ func (p *Pool) MarkIncludedProposerSlashing(ps *ethpb.ProposerSlashing) { p.lock.Lock() defer p.lock.Unlock() i := sort.Search(len(p.pendingProposerSlashing), func(i int) bool { - return p.pendingProposerSlashing[i].ProposerIndex >= ps.ProposerIndex + return p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex >= ps.Header_1.Header.ProposerIndex }) - if i != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[i].ProposerIndex == ps.ProposerIndex { + if i != len(p.pendingProposerSlashing) && p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex == ps.Header_1.Header.ProposerIndex { p.pendingProposerSlashing = append(p.pendingProposerSlashing[:i], p.pendingProposerSlashing[i+1:]...) } - p.included[ps.ProposerIndex] = true + p.included[ps.Header_1.Header.ProposerIndex] = true numProposerSlashingsIncluded.Inc() } diff --git a/beacon-chain/operations/slashings/service_proposer_test.go b/beacon-chain/operations/slashings/service_proposer_test.go index 090631f620..77729d3c5d 100644 --- a/beacon-chain/operations/slashings/service_proposer_test.go +++ b/beacon-chain/operations/slashings/service_proposer_test.go @@ -15,7 +15,12 @@ import ( func proposerSlashingForValIdx(valIdx uint64) *ethpb.ProposerSlashing { return ðpb.ProposerSlashing{ - ProposerIndex: valIdx, + Header_1: ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ProposerIndex: valIdx}, + }, + Header_2: ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ProposerIndex: valIdx}, + }, } } @@ -191,12 +196,12 @@ func TestPool_InsertProposerSlashing(t *testing.T) { t.Fatalf("Mismatched lengths of pending list. Got %d, wanted %d.", len(p.pendingProposerSlashing), len(tt.want)) } for i := range p.pendingAttesterSlashing { - if p.pendingProposerSlashing[i].ProposerIndex != tt.want[i].ProposerIndex { + if p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex != tt.want[i].Header_1.Header.ProposerIndex { t.Errorf( "Pending proposer to slash at index %d does not match expected. Got=%v wanted=%v", i, - p.pendingProposerSlashing[i].ProposerIndex, - tt.want[i].ProposerIndex, + p.pendingProposerSlashing[i].Header_1.Header.ProposerIndex, + tt.want[i].Header_1.Header.ProposerIndex, ) } if !proto.Equal(p.pendingProposerSlashing[i], tt.want[i]) { diff --git a/beacon-chain/p2p/BUILD.bazel b/beacon-chain/p2p/BUILD.bazel index cad6894d67..5b15a22051 100644 --- a/beacon-chain/p2p/BUILD.bazel +++ b/beacon-chain/p2p/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "dial_relay_node.go", "discovery.go", "doc.go", + "fork.go", "gossip_topic_mappings.go", "handshake.go", "info.go", @@ -20,6 +21,7 @@ go_library( "rpc_topic_mappings.go", "sender.go", "service.go", + "subnets.go", "utils.go", "watch_peers.go", ], @@ -30,6 +32,9 @@ go_library( ], deps = [ "//beacon-chain/cache:go_default_library", + "//beacon-chain/core/feed:go_default_library", + "//beacon-chain/core/feed/state:go_default_library", + "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/p2p/connmgr:go_default_library", "//beacon-chain/p2p/encoder:go_default_library", "//beacon-chain/p2p/peers:go_default_library", @@ -38,7 +43,9 @@ go_library( "//shared/featureconfig:go_default_library", "//shared/hashutil:go_default_library", "//shared/iputils:go_default_library", + "//shared/params:go_default_library", "//shared/runutil:go_default_library", + "//shared/sliceutil:go_default_library", "//shared/traceutil:go_default_library", "@com_github_btcsuite_btcd//btcec:go_default_library", "@com_github_dgraph_io_ristretto//:go_default_library", @@ -72,6 +79,7 @@ go_library( "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", + "@com_github_prysmaticlabs_go_ssz//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", "@io_opencensus_go//trace:go_default_library", ], @@ -84,20 +92,29 @@ go_test( "broadcaster_test.go", "dial_relay_node_test.go", "discovery_test.go", + "fork_test.go", "gossip_topic_mappings_test.go", "options_test.go", "parameter_test.go", "sender_test.go", "service_test.go", + "subnets_test.go", ], embed = [":go_default_library"], flaky = True, tags = ["block-network"], deps = [ + "//beacon-chain/blockchain/testing:go_default_library", "//beacon-chain/cache:go_default_library", + "//beacon-chain/core/feed:go_default_library", + "//beacon-chain/core/feed/state:go_default_library", + "//beacon-chain/core/helpers:go_default_library", + "//beacon-chain/db/testing:go_default_library", "//beacon-chain/p2p/testing:go_default_library", + "//proto/beacon/p2p/v1:go_default_library", "//proto/testing:go_default_library", "//shared/iputils:go_default_library", + "//shared/params:go_default_library", "//shared/testutil:go_default_library", "@com_github_ethereum_go_ethereum//p2p/discover:go_default_library", "@com_github_ethereum_go_ethereum//p2p/enode:go_default_library", @@ -114,6 +131,8 @@ go_test( "@com_github_multiformats_go_multiaddr//:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", + "@com_github_prysmaticlabs_go_ssz//:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", "@com_github_sirupsen_logrus//hooks/test:go_default_library", ], ) diff --git a/beacon-chain/p2p/broadcaster.go b/beacon-chain/p2p/broadcaster.go index 342a800552..8b0ae793c4 100644 --- a/beacon-chain/p2p/broadcaster.go +++ b/beacon-chain/p2p/broadcaster.go @@ -22,11 +22,15 @@ var ErrMessageNotMapped = errors.New("message type is not mapped to a PubSub top func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error { ctx, span := trace.StartSpan(ctx, "p2p.Broadcast") defer span.End() + forkDigest, err := s.ForkDigest() + if err != nil { + return err + } var topic string switch msg.(type) { case *eth.Attestation: - topic = attestationToTopic(msg.(*eth.Attestation)) + topic = attestationToTopic(msg.(*eth.Attestation), forkDigest) default: var ok bool topic, ok = GossipTypeMapping[reflect.TypeOf(msg)] @@ -34,6 +38,7 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error { traceutil.AnnotateError(span, ErrMessageNotMapped) return ErrMessageNotMapped } + topic = fmt.Sprintf(topic, forkDigest) } span.AddAttributes(trace.StringAttribute("topic", topic)) @@ -59,11 +64,11 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error { return nil } -const attestationSubnetTopicFormat = "/eth2/committee_index%d_beacon_attestation" +const attestationSubnetTopicFormat = "/eth2/%x/committee_index%d_beacon_attestation" -func attestationToTopic(att *eth.Attestation) string { +func attestationToTopic(att *eth.Attestation, forkDigest [4]byte) string { if att == nil || att.Data == nil { return "" } - return fmt.Sprintf(attestationSubnetTopicFormat, att.Data.CommitteeIndex) + return fmt.Sprintf(attestationSubnetTopicFormat, forkDigest, att.Data.CommitteeIndex) } diff --git a/beacon-chain/p2p/broadcaster_test.go b/beacon-chain/p2p/broadcaster_test.go index 38298759a0..8d5ee3e2a0 100644 --- a/beacon-chain/p2p/broadcaster_test.go +++ b/beacon-chain/p2p/broadcaster_test.go @@ -2,6 +2,7 @@ package p2p import ( "context" + "fmt" "reflect" "sync" "testing" @@ -9,8 +10,8 @@ import ( "github.com/gogo/protobuf/proto" eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" testpb "github.com/prysmaticlabs/prysm/proto/testing" + p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" "github.com/prysmaticlabs/prysm/shared/testutil" ) @@ -34,11 +35,17 @@ func TestService_Broadcast(t *testing.T) { Bar: 55, } + topic := "/eth2/%x/testing" // Set a test gossip mapping for testpb.TestSimpleMessage. - GossipTypeMapping[reflect.TypeOf(msg)] = "/testing" + GossipTypeMapping[reflect.TypeOf(msg)] = topic + digest, err := p.ForkDigest() + if err != nil { + t.Fatal(err) + } + topic = fmt.Sprintf(topic, digest) // External peer subscribes to the topic. - topic := "/testing" + p.Encoding().ProtocolSuffix() + topic += p.Encoding().ProtocolSuffix() sub, err := p2.PubSub().Subscribe(topic) if err != nil { t.Fatal(err) @@ -49,24 +56,24 @@ func TestService_Broadcast(t *testing.T) { // Async listen for the pubsub, must be before the broadcast. var wg sync.WaitGroup wg.Add(1) - go func() { + go func(tt *testing.T) { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() incomingMessage, err := sub.Next(ctx) if err != nil { - t.Fatal(err) + tt.Fatal(err) } result := &testpb.TestSimpleMessage{} if err := p.Encoding().Decode(incomingMessage.Data, result); err != nil { - t.Fatal(err) + tt.Fatal(err) } if !proto.Equal(result, msg) { - t.Errorf("Did not receive expected message, got %+v, wanted %+v", result, msg) + tt.Errorf("Did not receive expected message, got %+v, wanted %+v", result, msg) } - }() + }(t) // Broadcast to peers and wait. if err := p.Broadcast(context.Background(), msg); err != nil { @@ -99,7 +106,7 @@ func TestService_Attestation_Subnet(t *testing.T) { CommitteeIndex: 0, }, }, - topic: "/eth2/committee_index0_beacon_attestation", + topic: "/eth2/00000000/committee_index0_beacon_attestation", }, { att: ð.Attestation{ @@ -107,7 +114,7 @@ func TestService_Attestation_Subnet(t *testing.T) { CommitteeIndex: 11, }, }, - topic: "/eth2/committee_index11_beacon_attestation", + topic: "/eth2/00000000/committee_index11_beacon_attestation", }, { att: ð.Attestation{ @@ -115,7 +122,7 @@ func TestService_Attestation_Subnet(t *testing.T) { CommitteeIndex: 55, }, }, - topic: "/eth2/committee_index55_beacon_attestation", + topic: "/eth2/00000000/committee_index55_beacon_attestation", }, { att: ð.Attestation{}, @@ -126,7 +133,7 @@ func TestService_Attestation_Subnet(t *testing.T) { }, } for _, tt := range tests { - if res := attestationToTopic(tt.att); res != tt.topic { + if res := attestationToTopic(tt.att, [4]byte{} /* fork digest */); res != tt.topic { t.Errorf("Wrong topic, got %s wanted %s", res, tt.topic) } } diff --git a/beacon-chain/p2p/config.go b/beacon-chain/p2p/config.go index 6acbebb63a..c325b6e68f 100644 --- a/beacon-chain/p2p/config.go +++ b/beacon-chain/p2p/config.go @@ -1,11 +1,15 @@ package p2p +import ( + statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" +) + // Config for the p2p service. These parameters are set from application level flags // to initialize the p2p service. type Config struct { NoDiscovery bool EnableUPnP bool - EnableDiscv5 bool + DisableDiscv5 bool StaticPeers []string BootstrapNodeAddr []string KademliaBootStrapAddr []string @@ -16,9 +20,11 @@ type Config struct { HostDNS string PrivateKey string DataDir string + MetaDataDir string TCPPort uint UDPPort uint MaxPeers uint WhitelistCIDR string Encoding string + StateNotifier statefeed.Notifier } diff --git a/beacon-chain/p2p/discovery.go b/beacon-chain/p2p/discovery.go index 2308625022..35a925f94c 100644 --- a/beacon-chain/p2p/discovery.go +++ b/beacon-chain/p2p/discovery.go @@ -14,12 +14,8 @@ import ( "github.com/libp2p/go-libp2p-core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" - "github.com/prysmaticlabs/go-bitfield" ) -const attestationSubnetCount = 64 -const attSubnetEnrKey = "attnets" - // Listener defines the discovery V5 network interface that is used // to communicate with other peers. type Listener interface { @@ -34,10 +30,13 @@ type Listener interface { LocalNode() *enode.LocalNode } -func createListener(ipAddr net.IP, privKey *ecdsa.PrivateKey, cfg *Config) *discover.UDPv5 { +func (s *Service) createListener( + ipAddr net.IP, + privKey *ecdsa.PrivateKey, +) *discover.UDPv5 { udpAddr := &net.UDPAddr{ IP: ipAddr, - Port: int(cfg.UDPPort), + Port: int(s.cfg.UDPPort), } // assume ip is either ipv4 or ipv6 networkVersion := "" @@ -50,12 +49,17 @@ func createListener(ipAddr net.IP, privKey *ecdsa.PrivateKey, cfg *Config) *disc if err != nil { log.Fatal(err) } - localNode, err := createLocalNode(privKey, ipAddr, int(cfg.UDPPort), int(cfg.TCPPort)) + localNode, err := s.createLocalNode( + privKey, + ipAddr, + int(s.cfg.UDPPort), + int(s.cfg.TCPPort), + ) if err != nil { log.Fatal(err) } - if cfg.HostAddress != "" { - hostIP := net.ParseIP(cfg.HostAddress) + if s.cfg.HostAddress != "" { + hostIP := net.ParseIP(s.cfg.HostAddress) if hostIP.To4() == nil && hostIP.To16() == nil { log.Errorf("Invalid host address given: %s", hostIP.String()) } else { @@ -66,7 +70,7 @@ func createListener(ipAddr net.IP, privKey *ecdsa.PrivateKey, cfg *Config) *disc PrivateKey: privKey, } dv5Cfg.Bootnodes = []*enode.Node{} - for _, addr := range cfg.Discv5BootStrapAddr { + for _, addr := range s.cfg.Discv5BootStrapAddr { bootNode, err := enode.Parse(enode.ValidSchemes, addr) if err != nil { log.Fatal(err) @@ -81,7 +85,12 @@ func createListener(ipAddr net.IP, privKey *ecdsa.PrivateKey, cfg *Config) *disc return network } -func createLocalNode(privKey *ecdsa.PrivateKey, ipAddr net.IP, udpPort int, tcpPort int) (*enode.LocalNode, error) { +func (s *Service) createLocalNode( + privKey *ecdsa.PrivateKey, + ipAddr net.IP, + udpPort int, + tcpPort int, +) (*enode.LocalNode, error) { db, err := enode.OpenDB("") if err != nil { return nil, errors.Wrap(err, "could not open node's peer database") @@ -96,11 +105,18 @@ func createLocalNode(privKey *ecdsa.PrivateKey, ipAddr net.IP, udpPort int, tcpP localNode.SetFallbackIP(ipAddr) localNode.SetFallbackUDP(udpPort) + localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot) + if err != nil { + return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr") + } return intializeAttSubnets(localNode), nil } -func startDiscoveryV5(addr net.IP, privKey *ecdsa.PrivateKey, cfg *Config) (*discover.UDPv5, error) { - listener := createListener(addr, privKey, cfg) +func (s *Service) startDiscoveryV5( + addr net.IP, + privKey *ecdsa.PrivateKey, +) (*discover.UDPv5, error) { + listener := s.createListener(addr, privKey) record := listener.Self() log.WithField("ENR", record.String()).Info("Started discovery v5") return listener, nil @@ -120,29 +136,6 @@ func startDHTDiscovery(host core.Host, bootstrapAddr string) error { return err } -func intializeAttSubnets(node *enode.LocalNode) *enode.LocalNode { - bitV := bitfield.NewBitvector64() - entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes()) - node.Set(entry) - return node -} - -func retrieveAttSubnets(record *enr.Record) ([]uint64, error) { - bitV := bitfield.NewBitvector64() - entry := enr.WithEntry(attSubnetEnrKey, &bitV) - err := record.Load(entry) - if err != nil { - return nil, err - } - committeeIdxs := []uint64{} - for i := uint64(0); i < 64; i++ { - if bitV.BitAt(i) { - committeeIdxs = append(committeeIdxs, i) - } - } - return committeeIdxs, nil -} - func parseBootStrapAddrs(addrs []string) (discv5Nodes []string, kadDHTNodes []string) { discv5Nodes, kadDHTNodes = parseGenericAddrs(addrs) if len(discv5Nodes) == 0 && len(kadDHTNodes) == 0 { diff --git a/beacon-chain/p2p/discovery_test.go b/beacon-chain/p2p/discovery_test.go index efbbf1d9e1..c828d251af 100644 --- a/beacon-chain/p2p/discovery_test.go +++ b/beacon-chain/p2p/discovery_test.go @@ -13,10 +13,11 @@ import ( "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" "github.com/libp2p/go-libp2p-core/host" - "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/prysm/beacon-chain/cache" + mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" + statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" + testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/shared/iputils" "github.com/prysmaticlabs/prysm/shared/testutil" logTest "github.com/sirupsen/logrus/hooks/test" @@ -51,7 +52,10 @@ func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) { func TestCreateListener(t *testing.T) { port := 1024 ipAddr, pkey := createAddrAndPrivKey(t) - listener := createListener(ipAddr, pkey, &Config{UDPPort: uint(port)}) + s := &Service{ + cfg: &Config{UDPPort: uint(port)}, + } + listener := s.createListener(ipAddr, pkey) defer listener.Close() if !listener.Self().IP().Equal(ipAddr) { @@ -73,26 +77,44 @@ func TestCreateListener(t *testing.T) { func TestStartDiscV5_DiscoverAllPeers(t *testing.T) { port := 2000 ipAddr, pkey := createAddrAndPrivKey(t) - bootListener := createListener(ipAddr, pkey, &Config{UDPPort: uint(port)}) + genesisTime := time.Now() + genesisValidatorsRoot := make([]byte, 32) + s := &Service{ + cfg: &Config{UDPPort: uint(port)}, + genesisTime: genesisTime, + genesisValidatorsRoot: genesisValidatorsRoot, + } + bootListener := s.createListener(ipAddr, pkey) defer bootListener.Close() bootNode := bootListener.Self() - cfg := &Config{ - Discv5BootStrapAddr: []string{bootNode.String()}, - Encoding: "ssz", - } var listeners []*discover.UDPv5 for i := 1; i <= 5; i++ { port = 3000 + i - cfg.UDPPort = uint(port) + cfg := &Config{ + Discv5BootStrapAddr: []string{bootNode.String()}, + Encoding: "ssz", + UDPPort: uint(port), + } ipAddr, pkey := createAddrAndPrivKey(t) - listener, err := startDiscoveryV5(ipAddr, pkey, cfg) + s = &Service{ + cfg: cfg, + genesisTime: genesisTime, + genesisValidatorsRoot: genesisValidatorsRoot, + } + listener, err := s.startDiscoveryV5(ipAddr, pkey) if err != nil { t.Errorf("Could not start discovery for node: %v", err) } listeners = append(listeners, listener) } + defer func() { + // Close down all peers. + for _, listener := range listeners { + listener.Close() + } + }() // Wait for the nodes to have their local routing tables to be populated with the other nodes time.Sleep(discoveryWaitTime) @@ -103,105 +125,13 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) { t.Errorf("The node's local table doesn't have the expected number of nodes. "+ "Expected more than or equal to %d but got %d", 4, len(nodes)) } - - // Close all ports - for _, listener := range listeners { - listener.Close() - } -} - -func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) { - port := 2000 - ipAddr, pkey := createAddrAndPrivKey(t) - bootListener := createListener(ipAddr, pkey, &Config{UDPPort: uint(port)}) - defer bootListener.Close() - - bootNode := bootListener.Self() - cfg := &Config{ - BootstrapNodeAddr: []string{bootNode.String()}, - Discv5BootStrapAddr: []string{bootNode.String()}, - Encoding: "ssz", - MaxPeers: 30, - } - // Use shorter period for testing. - currentPeriod := pollingPeriod - pollingPeriod = 1 * time.Second - defer func() { - pollingPeriod = currentPeriod - }() - - var listeners []*discover.UDPv5 - for i := 1; i <= 3; i++ { - port = 3000 + i - cfg.UDPPort = uint(port) - ipAddr, pkey := createAddrAndPrivKey(t) - listener, err := startDiscoveryV5(ipAddr, pkey, cfg) - if err != nil { - t.Errorf("Could not start discovery for node: %v", err) - } - bitV := bitfield.NewBitvector64() - bitV.SetBitAt(uint64(i), true) - - entry := enr.WithEntry(attSubnetEnrKey, &bitV) - listener.LocalNode().Set(entry) - listeners = append(listeners, listener) - } - - // Make one service on port 3001. - port = 4000 - cfg.UDPPort = uint(port) - s, err := NewService(cfg) - if err != nil { - t.Fatal(err) - } - s.Start() - defer func() { - if err := s.Stop(); err != nil { - t.Log(err) - } - }() - - // Wait for the nodes to have their local routing tables to be populated with the other nodes - time.Sleep(discoveryWaitTime) - - // look up 3 different subnets - exists, err := s.FindPeersWithSubnet(1) - if err != nil { - t.Fatal(err) - } - exists2, err := s.FindPeersWithSubnet(2) - if err != nil { - t.Fatal(err) - } - exists3, err := s.FindPeersWithSubnet(3) - if err != nil { - t.Fatal(err) - } - if !exists || !exists2 || !exists3 { - t.Fatal("Peer with subnet doesn't exist") - } - - // update ENR of a peer - testService := &Service{dv5Listener: listeners[0]} - cache.CommitteeIDs.AddIDs([]uint64{10}, 0) - testService.RefreshENR(0) - time.Sleep(2 * time.Second) - - exists, err = s.FindPeersWithSubnet(2) - if err != nil { - t.Fatal(err) - } - - if !exists { - t.Fatal("Peer with subnet doesn't exist") - } - } func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) { addr := net.ParseIP("invalidIP") _, pkey := createAddrAndPrivKey(t) - node, err := createLocalNode(pkey, addr, 0, 0) + s := &Service{} + node, err := s.createLocalNode(pkey, addr, 0, 0) if err != nil { t.Fatal(err) } @@ -214,7 +144,14 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) { func TestMultiAddrConversion_OK(t *testing.T) { hook := logTest.NewGlobal() ipAddr, pkey := createAddrAndPrivKey(t) - listener := createListener(ipAddr, pkey, &Config{}) + s := &Service{ + cfg: &Config{ + TCPPort: 0, + UDPPort: 0, + }, + } + listener := s.createListener(ipAddr, pkey) + defer listener.Close() _ = convertToMultiAddr([]*enode.Node{listener.Self()}) testutil.AssertLogsDoNotContain(t, hook, "Node doesn't have an ip4 address") @@ -223,8 +160,12 @@ func TestMultiAddrConversion_OK(t *testing.T) { } func TestStaticPeering_PeersAreAdded(t *testing.T) { - cfg := &Config{Encoding: "ssz", MaxPeers: 30} - port := 3000 + db := testDB.SetupDB(t) + defer testDB.TeardownDB(t, db) + cfg := &Config{ + Encoding: "ssz", MaxPeers: 30, + } + port := 6000 var staticPeers []string var hosts []host.Host // setup other nodes @@ -242,26 +183,37 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) { } }() - cfg.TCPPort = 14001 - cfg.UDPPort = 14000 + cfg.TCPPort = 14500 + cfg.UDPPort = 14501 cfg.StaticPeers = staticPeers - + cfg.StateNotifier = &mock.MockStateNotifier{} s, err := NewService(cfg) if err != nil { t.Fatal(err) } - s.Start() - s.dv5Listener = &mockListener{} - defer func() { - if err := s.Stop(); err != nil { - t.Log(err) - } + exitRoutine := make(chan bool) + go func() { + s.Start() + <-exitRoutine }() - time.Sleep(100 * time.Millisecond) - + // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). + for sent := 0; sent == 0; { + sent = s.stateNotifier.StateFeed().Send(&feed.Event{ + Type: statefeed.Initialized, + Data: &statefeed.InitializedData{ + StartTime: time.Now(), + GenesisValidatorsRoot: make([]byte, 32), + }, + }) + } + time.Sleep(4 * time.Second) peers := s.host.Network().Peers() if len(peers) != 5 { t.Errorf("Not all peers added to peerstore, wanted %d but got %d", 5, len(peers)) } + if err := s.Stop(); err != nil { + t.Fatal(err) + } + exitRoutine <- true } diff --git a/beacon-chain/p2p/encoder/BUILD.bazel b/beacon-chain/p2p/encoder/BUILD.bazel index bd38107582..059d9148e1 100644 --- a/beacon-chain/p2p/encoder/BUILD.bazel +++ b/beacon-chain/p2p/encoder/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "@com_github_gogo_protobuf//proto:go_default_library", "@com_github_golang_snappy//:go_default_library", "@com_github_prysmaticlabs_go_ssz//:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", ], ) diff --git a/beacon-chain/p2p/encoder/ssz.go b/beacon-chain/p2p/encoder/ssz.go index 2db27f558b..c928005913 100644 --- a/beacon-chain/p2p/encoder/ssz.go +++ b/beacon-chain/p2p/encoder/ssz.go @@ -1,12 +1,14 @@ package encoder import ( + "bytes" "fmt" "io" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prysmaticlabs/go-ssz" + "github.com/sirupsen/logrus" ) var _ = NetworkEncoding(&SszNetworkEncoder{}) @@ -21,14 +23,7 @@ type SszNetworkEncoder struct { } func (e SszNetworkEncoder) doEncode(msg interface{}) ([]byte, error) { - b, err := ssz.Marshal(msg) - if err != nil { - return nil, err - } - if e.UseSnappyCompression { - b = snappy.Encode(nil /*dst*/, b) - } - return b, nil + return ssz.Marshal(msg) } // Encode the proto message to the io.Writer. @@ -36,11 +31,13 @@ func (e SszNetworkEncoder) Encode(w io.Writer, msg interface{}) (int, error) { if msg == nil { return 0, nil } - b, err := e.doEncode(msg) if err != nil { return 0, err } + if e.UseSnappyCompression { + return writeSnappyBuffer(w, b) + } return w.Write(b) } @@ -54,7 +51,14 @@ func (e SszNetworkEncoder) EncodeWithLength(w io.Writer, msg interface{}) (int, if err != nil { return 0, err } - b = append(proto.EncodeVarint(uint64(len(b))), b...) + // write varint first + _, err = w.Write(proto.EncodeVarint(uint64(len(b)))) + if err != nil { + return 0, err + } + if e.UseSnappyCompression { + return writeSnappyBuffer(w, b) + } return w.Write(b) } @@ -71,21 +75,34 @@ func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg interface{}, max if uint64(len(b)) > maxSize { return 0, fmt.Errorf("size of encoded message is %d which is larger than the provided max limit of %d", len(b), maxSize) } - b = append(proto.EncodeVarint(uint64(len(b))), b...) + // write varint first + _, err = w.Write(proto.EncodeVarint(uint64(len(b)))) + if err != nil { + return 0, err + } + if e.UseSnappyCompression { + return writeSnappyBuffer(w, b) + } return w.Write(b) } +func (e SszNetworkEncoder) doDecode(b []byte, to interface{}) error { + return ssz.Unmarshal(b, to) +} + // Decode the bytes to the protobuf message provided. func (e SszNetworkEncoder) Decode(b []byte, to interface{}) error { if e.UseSnappyCompression { - var err error - b, err = snappy.Decode(nil /*dst*/, b) + newBuffer := bytes.NewBuffer(b) + r := snappy.NewReader(newBuffer) + newObj := make([]byte, len(b)) + numOfBytes, err := r.Read(newObj) if err != nil { return err } + return e.doDecode(newObj[:numOfBytes], to) } - - return ssz.Unmarshal(b, to) + return e.doDecode(b, to) } // DecodeWithLength the bytes from io.Reader to the protobuf message provided. @@ -103,15 +120,18 @@ func (e SszNetworkEncoder) DecodeWithMaxLength(r io.Reader, to interface{}, maxS if err != nil { return err } + if e.UseSnappyCompression { + r = snappy.NewReader(r) + } if msgLen > maxSize { return fmt.Errorf("size of decoded message is %d which is larger than the provided max limit of %d", msgLen, maxSize) } - b := make([]byte, msgLen) - _, err = r.Read(b) + b := make([]byte, e.MaxLength(int(msgLen))) + numOfBytes, err := r.Read(b) if err != nil { return err } - return e.Decode(b, to) + return e.doDecode(b[:numOfBytes], to) } // ProtocolSuffix returns the appropriate suffix for protocol IDs. @@ -121,3 +141,23 @@ func (e SszNetworkEncoder) ProtocolSuffix() string { } return "/ssz" } + +// MaxLength specifies the maximum possible length of an encoded +// chunk of data. +func (e SszNetworkEncoder) MaxLength(length int) int { + if e.UseSnappyCompression { + return snappy.MaxEncodedLen(length) + } + return length +} + +// Writes a bytes value through a snappy buffered writer. +func writeSnappyBuffer(w io.Writer, b []byte) (int, error) { + bufWriter := snappy.NewBufferedWriter(w) + defer func() { + if err := bufWriter.Close(); err != nil { + logrus.WithError(err).Error("Failed to close snappy buffered writer") + } + }() + return bufWriter.Write(b) +} diff --git a/beacon-chain/p2p/fork.go b/beacon-chain/p2p/fork.go new file mode 100644 index 0000000000..07c29ad9e7 --- /dev/null +++ b/beacon-chain/p2p/fork.go @@ -0,0 +1,147 @@ +package p2p + +import ( + "bytes" + "encoding/base64" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/pkg/errors" + "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/params" + "github.com/sirupsen/logrus" +) + +// ENR key used for eth2-related fork data. +const eth2ENRKey = "eth2" + +// ForkDigest returns the current fork digest of +// the node. +func (s *Service) ForkDigest() ([4]byte, error) { + return createForkDigest(s.genesisTime, s.genesisValidatorsRoot) +} + +// Compares fork ENRs between an incoming peer's record and our node's +// local record values for current and next fork version/epoch. +func (s *Service) compareForkENR(record *enr.Record) error { + currentRecord := s.dv5Listener.LocalNode().Node().Record() + peerForkENR, err := retrieveForkEntry(record) + if err != nil { + return err + } + currentForkENR, err := retrieveForkEntry(currentRecord) + if err != nil { + return err + } + // Clients SHOULD connect to peers with current_fork_digest, next_fork_version, + // and next_fork_epoch that match local values. + if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) { + return fmt.Errorf( + "fork digest of peer with ENR %v: %v, does not match local value: %v", + record, + peerForkENR.CurrentForkDigest, + currentForkENR.CurrentForkDigest, + ) + } + // Clients MAY connect to peers with the same current_fork_version but a + // different next_fork_version/next_fork_epoch. Unless ENRForkID is manually + // updated to matching prior to the earlier next_fork_epoch of the two clients, + // these type of connecting clients will be unable to successfully interact + // starting at the earlier next_fork_epoch. + buf := bytes.NewBuffer([]byte{}) + if err := record.EncodeRLP(buf); err != nil { + return errors.Wrap(err, "could not encode ENR record to bytes") + } + enrString := base64.URLEncoding.EncodeToString(buf.Bytes()) + if peerForkENR.NextForkEpoch != currentForkENR.NextForkEpoch { + log.WithFields(logrus.Fields{ + "peerNextForkEpoch": peerForkENR.NextForkEpoch, + "peerENR": enrString, + }).Debug("Peer matches fork digest but has different next fork epoch") + } + if !bytes.Equal(peerForkENR.NextForkVersion, currentForkENR.NextForkVersion) { + log.WithFields(logrus.Fields{ + "peerNextForkVersion": peerForkENR.NextForkVersion, + "peerENR": enrString, + }).Debug("Peer matches fork digest but has different next fork version") + } + return nil +} + +// Creates a fork digest from a genesis time and genesis +// validators root, utilizing the current slot to determine +// the active fork version in the node. +func createForkDigest( + genesisTime time.Time, + genesisValidatorsRoot []byte, +) ([4]byte, error) { + currentSlot := helpers.SlotsSince(genesisTime) + currentEpoch := helpers.SlotToEpoch(currentSlot) + + // We retrieve a list of scheduled forks by epoch. + // We loop through the keys in this map to determine the current + // fork version based on the current, time-based epoch number + // since the genesis time. + currentForkVersion := params.BeaconConfig().GenesisForkVersion + scheduledForks := params.BeaconConfig().ForkVersionSchedule + for epoch, forkVersion := range scheduledForks { + if epoch <= currentEpoch { + currentForkVersion = forkVersion + } + } + + digest, err := helpers.ComputeForkDigest(currentForkVersion, genesisValidatorsRoot) + if err != nil { + return [4]byte{}, err + } + return digest, nil +} + +// Adds a fork entry as an ENR record under the eth2EnrKey for +// the local node. The fork entry is an ssz-encoded enrForkID type +// which takes into account the current fork version from the current +// epoch to create a fork digest, the next fork version, +// and the next fork epoch. +func addForkEntry( + node *enode.LocalNode, + genesisTime time.Time, + genesisValidatorsRoot []byte, +) (*enode.LocalNode, error) { + digest, err := createForkDigest(genesisTime, genesisValidatorsRoot) + if err != nil { + return nil, err + } + nextForkEpoch := params.BeaconConfig().NextForkEpoch + enrForkID := &pb.ENRForkID{ + CurrentForkDigest: digest[:], + NextForkVersion: params.BeaconConfig().NextForkVersion, + NextForkEpoch: nextForkEpoch, + } + enc, err := ssz.Marshal(enrForkID) + if err != nil { + return nil, err + } + forkEntry := enr.WithEntry(eth2ENRKey, enc) + node.Set(forkEntry) + return node, nil +} + +// Retrieves an enrForkID from an ENR record by key lookup +// under the eth2EnrKey. +func retrieveForkEntry(record *enr.Record) (*pb.ENRForkID, error) { + sszEncodedForkEntry := make([]byte, 16) + entry := enr.WithEntry(eth2ENRKey, &sszEncodedForkEntry) + err := record.Load(entry) + if err != nil { + return nil, err + } + forkEntry := &pb.ENRForkID{} + if err := ssz.Unmarshal(sszEncodedForkEntry, forkEntry); err != nil { + return nil, err + } + return forkEntry, nil +} diff --git a/beacon-chain/p2p/fork_test.go b/beacon-chain/p2p/fork_test.go new file mode 100644 index 0000000000..0af93eea16 --- /dev/null +++ b/beacon-chain/p2p/fork_test.go @@ -0,0 +1,267 @@ +package p2p + +import ( + "bytes" + "math/rand" + "os" + "path" + "strconv" + "testing" + "time" + + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/params" + "github.com/prysmaticlabs/prysm/shared/testutil" + "github.com/sirupsen/logrus" + logTest "github.com/sirupsen/logrus/hooks/test" +) + +func TestStartDiscv5_DifferentForkDigests(t *testing.T) { + db := testDB.SetupDB(t) + defer testDB.TeardownDB(t, db) + port := 2000 + ipAddr, pkey := createAddrAndPrivKey(t) + genesisTime := time.Now() + genesisValidatorsRoot := make([]byte, 32) + s := &Service{ + cfg: &Config{UDPPort: uint(port)}, + genesisTime: genesisTime, + genesisValidatorsRoot: genesisValidatorsRoot, + } + bootListener := s.createListener(ipAddr, pkey) + defer bootListener.Close() + + bootNode := bootListener.Self() + cfg := &Config{ + Discv5BootStrapAddr: []string{bootNode.String()}, + Encoding: "ssz", + UDPPort: uint(port), + } + + var listeners []*discover.UDPv5 + for i := 1; i <= 5; i++ { + port = 3000 + i + cfg.UDPPort = uint(port) + ipAddr, pkey := createAddrAndPrivKey(t) + + // We give every peer a different genesis validators root, which + // will cause each peer to have a different ForkDigest, preventing + // them from connecting according to our discovery rules for eth2. + root := make([]byte, 32) + copy(root, strconv.Itoa(port)) + s = &Service{ + cfg: cfg, + genesisTime: genesisTime, + genesisValidatorsRoot: root, + } + listener, err := s.startDiscoveryV5(ipAddr, pkey) + if err != nil { + t.Errorf("Could not start discovery for node: %v", err) + } + listeners = append(listeners, listener) + } + defer func() { + // Close down all peers. + for _, listener := range listeners { + listener.Close() + } + }() + + // Wait for the nodes to have their local routing tables to be populated with the other nodes + time.Sleep(discoveryWaitTime) + + lastListener := listeners[len(listeners)-1] + nodes := lastListener.Lookup(bootNode.ID()) + if len(nodes) < 4 { + t.Errorf("The node's local table doesn't have the expected number of nodes. "+ + "Expected more than or equal to %d but got %d", 4, len(nodes)) + } + + // Now, we start a new p2p service. It should have no peers aside from the + // bootnode given all nodes provided by discv5 will have different fork digests. + cfg.UDPPort = 14000 + cfg.TCPPort = 14001 + s, err := NewService(cfg) + if err != nil { + t.Fatal(err) + } + s.genesisTime = genesisTime + s.genesisValidatorsRoot = make([]byte, 32) + s.dv5Listener = lastListener + multiAddrs := s.processPeers(nodes) + + // We should not have valid peers if the fork digest mismatched. + if len(multiAddrs) != 0 { + t.Errorf("Expected 0 valid peers, got %d", len(multiAddrs)) + } + if err := s.Stop(); err != nil { + t.Fatal(err) + } +} + +func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) { + db := testDB.SetupDB(t) + defer testDB.TeardownDB(t, db) + hook := logTest.NewGlobal() + logrus.SetLevel(logrus.DebugLevel) + port := 2000 + ipAddr, pkey := createAddrAndPrivKey(t) + genesisTime := time.Now() + genesisValidatorsRoot := make([]byte, 32) + s := &Service{ + cfg: &Config{UDPPort: uint(port)}, + genesisTime: genesisTime, + genesisValidatorsRoot: genesisValidatorsRoot, + } + bootListener := s.createListener(ipAddr, pkey) + defer bootListener.Close() + + bootNode := bootListener.Self() + cfg := &Config{ + Discv5BootStrapAddr: []string{bootNode.String()}, + Encoding: "ssz", + UDPPort: uint(port), + } + + originalBeaconConfig := params.BeaconConfig() + + var listeners []*discover.UDPv5 + for i := 1; i <= 5; i++ { + port = 3000 + i + cfg.UDPPort = uint(port) + ipAddr, pkey := createAddrAndPrivKey(t) + + c := params.BeaconConfig() + nextForkEpoch := uint64(i) + c.NextForkEpoch = nextForkEpoch + params.OverrideBeaconConfig(c) + + // We give every peer a different genesis validators root, which + // will cause each peer to have a different ForkDigest, preventing + // them from connecting according to our discovery rules for eth2. + s = &Service{ + cfg: cfg, + genesisTime: genesisTime, + genesisValidatorsRoot: genesisValidatorsRoot, + } + listener, err := s.startDiscoveryV5(ipAddr, pkey) + if err != nil { + t.Errorf("Could not start discovery for node: %v", err) + } + listeners = append(listeners, listener) + } + defer func() { + // Close down all peers. + for _, listener := range listeners { + listener.Close() + } + }() + + // Wait for the nodes to have their local routing tables to be populated with the other nodes + time.Sleep(discoveryWaitTime) + + lastListener := listeners[len(listeners)-1] + nodes := lastListener.Lookup(bootNode.ID()) + if len(nodes) < 4 { + t.Errorf("The node's local table doesn't have the expected number of nodes. "+ + "Expected more than or equal to %d but got %d", 4, len(nodes)) + } + + // Now, we start a new p2p service. It should have no peers aside from the + // bootnode given all nodes provided by discv5 will have different fork digests. + cfg.UDPPort = 14000 + cfg.TCPPort = 14001 + params.OverrideBeaconConfig(originalBeaconConfig) + s, err := NewService(cfg) + if err != nil { + t.Fatal(err) + } + + s.genesisTime = genesisTime + s.genesisValidatorsRoot = make([]byte, 32) + s.dv5Listener = lastListener + multiAddrs := s.processPeers(nodes) + if len(multiAddrs) == 0 { + t.Error("Expected to have valid peers, got 0") + } + + testutil.AssertLogsContain(t, hook, "Peer matches fork digest but has different next fork epoch") + if err := s.Stop(); err != nil { + t.Fatal(err) + } +} + +func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) { + c := params.BeaconConfig() + originalConfig := c + c.ForkVersionSchedule = map[uint64][]byte{ + 0: params.BeaconConfig().GenesisForkVersion, + 1: {0, 0, 0, 1}, + } + nextForkEpoch := uint64(1) + nextForkVersion := []byte{0, 0, 0, 1} + c.NextForkEpoch = nextForkEpoch + c.NextForkVersion = nextForkVersion + params.OverrideBeaconConfig(c) + defer params.OverrideBeaconConfig(originalConfig) + + genesisTime := time.Now() + genesisValidatorsRoot := make([]byte, 32) + digest, err := createForkDigest(genesisTime, make([]byte, 32)) + if err != nil { + t.Fatal(err) + } + enrForkID := &pb.ENRForkID{ + CurrentForkDigest: digest[:], + NextForkVersion: nextForkVersion, + NextForkEpoch: nextForkEpoch, + } + enc, err := ssz.Marshal(enrForkID) + if err != nil { + t.Fatal(err) + } + forkEntry := enr.WithEntry(eth2ENRKey, enc) + // In epoch 1 of current time, the fork version should be + // {0, 0, 0, 1} according to the configuration override above. + temp := testutil.TempDir() + randNum := rand.Int() + tempPath := path.Join(temp, strconv.Itoa(randNum)) + if err := os.Mkdir(tempPath, 0700); err != nil { + t.Fatal(err) + } + pkey, err := privKey(&Config{Encoding: "ssz", DataDir: tempPath}) + if err != nil { + t.Fatalf("Could not get private key: %v", err) + } + db, err := enode.OpenDB("") + if err != nil { + t.Fatal(err) + } + localNode := enode.NewLocalNode(db, pkey) + localNode.Set(forkEntry) + + want, err := helpers.ComputeForkDigest([]byte{0, 0, 0, 0}, genesisValidatorsRoot) + if err != nil { + t.Fatal(err) + } + + resp, err := retrieveForkEntry(localNode.Node().Record()) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(resp.CurrentForkDigest, want[:]) { + t.Errorf("Wanted fork digest: %v, received %v", want, resp.CurrentForkDigest) + } + if !bytes.Equal(resp.NextForkVersion[:], nextForkVersion) { + t.Errorf("Wanted next fork version: %v, received %v", nextForkVersion, resp.NextForkVersion) + } + if resp.NextForkEpoch != nextForkEpoch { + t.Errorf("Wanted next for epoch: %d, received: %d", nextForkEpoch, resp.NextForkEpoch) + } +} diff --git a/beacon-chain/p2p/gossip_topic_mappings.go b/beacon-chain/p2p/gossip_topic_mappings.go index 19a11c9d19..c39b192e3d 100644 --- a/beacon-chain/p2p/gossip_topic_mappings.go +++ b/beacon-chain/p2p/gossip_topic_mappings.go @@ -10,12 +10,12 @@ import ( // GossipTopicMappings represent the protocol ID to protobuf message type map for easy // lookup. var GossipTopicMappings = map[string]proto.Message{ - "/eth2/beacon_block": &pb.SignedBeaconBlock{}, - "/eth2/committee_index%d_beacon_attestation": &pb.Attestation{}, - "/eth2/voluntary_exit": &pb.SignedVoluntaryExit{}, - "/eth2/proposer_slashing": &pb.ProposerSlashing{}, - "/eth2/attester_slashing": &pb.AttesterSlashing{}, - "/eth2/beacon_aggregate_and_proof": &pb.AggregateAttestationAndProof{}, + "/eth2/%x/beacon_block": &pb.SignedBeaconBlock{}, + "/eth2/%x/committee_index%d_beacon_attestation": &pb.Attestation{}, + "/eth2/%x/voluntary_exit": &pb.SignedVoluntaryExit{}, + "/eth2/%x/proposer_slashing": &pb.ProposerSlashing{}, + "/eth2/%x/attester_slashing": &pb.AttesterSlashing{}, + "/eth2/%x/beacon_aggregate_and_proof": &pb.SignedAggregateAttestationAndProof{}, } // GossipTypeMapping is the inverse of GossipTopicMappings so that an arbitrary protobuf message diff --git a/beacon-chain/p2p/handshake.go b/beacon-chain/p2p/handshake.go index 4e35b53120..94b02822ad 100644 --- a/beacon-chain/p2p/handshake.go +++ b/beacon-chain/p2p/handshake.go @@ -26,7 +26,7 @@ func (s *Service) AddConnectionHandler(reqFunc func(ctx context.Context, id peer log.WithField("currentState", peerConnectionState).WithField("reason", "already active").Trace("Ignoring connection request") return } - s.peers.Add(conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction, nil) + s.peers.Add(nil /* ENR */, conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction) if len(s.peers.Active()) >= int(s.cfg.MaxPeers) { log.WithField("reason", "at peer limit").Trace("Ignoring connection request") if err := s.Disconnect(conn.RemotePeer()); err != nil { diff --git a/beacon-chain/p2p/info.go b/beacon-chain/p2p/info.go index 35b1fe0f9a..d97555b9bb 100644 --- a/beacon-chain/p2p/info.go +++ b/beacon-chain/p2p/info.go @@ -22,7 +22,7 @@ self=%s %v `, s.cfg.BootstrapNodeAddr, - selfAddresses(s.host), + s.selfAddresses(), len(s.host.Network().Peers()), formatPeers(s.host), // Must be last. Writes one entry per row. ); err != nil { @@ -37,10 +37,13 @@ self=%s } // selfAddresses formats the host data into dialable strings, comma separated. -func selfAddresses(h host.Host) string { +func (s *Service) selfAddresses() string { var addresses []string - for _, ma := range h.Addrs() { - addresses = append(addresses, ma.String()+"/p2p/"+h.ID().Pretty()) + if s.dv5Listener != nil { + addresses = append(addresses, s.dv5Listener.Self().String()) + } + for _, ma := range s.host.Addrs() { + addresses = append(addresses, ma.String()+"/p2p/"+s.host.ID().Pretty()) } return strings.Join(addresses, ",") } diff --git a/beacon-chain/p2p/interfaces.go b/beacon-chain/p2p/interfaces.go index 88fd333b2f..1ba65008ab 100644 --- a/beacon-chain/p2p/interfaces.go +++ b/beacon-chain/p2p/interfaces.go @@ -9,6 +9,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" ) // P2P represents the full p2p interface composed of all of the sub-interfaces. @@ -21,6 +22,7 @@ type P2P interface { Sender ConnectionHandler PeersProvider + MetadataProvider } // Broadcaster broadcasts messages to peers over the p2p pubsub protocol. @@ -42,6 +44,7 @@ type ConnectionHandler interface { // EncodingProvider provides p2p network encoding. type EncodingProvider interface { Encoding() encoder.NetworkEncoding + ForkDigest() ([4]byte, error) } // PubSubProvider provides the p2p pubsub protocol. @@ -55,14 +58,21 @@ type PeerManager interface { PeerID() peer.ID RefreshENR(epoch uint64) FindPeersWithSubnet(index uint64) (bool, error) + AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) } // Sender abstracts the sending functionality from libp2p. type Sender interface { - Send(context.Context, interface{}, peer.ID) (network.Stream, error) + Send(context.Context, interface{}, string, peer.ID) (network.Stream, error) } // PeersProvider abstracts obtaining our current list of known peers status. type PeersProvider interface { Peers() *peers.Status } + +// MetadataProvider returns the metadata related information for the local peer. +type MetadataProvider interface { + Metadata() *pb.MetaData + MetadataSeq() uint64 +} diff --git a/beacon-chain/p2p/monitoring.go b/beacon-chain/p2p/monitoring.go index 5f39de7f22..d8d91d291b 100644 --- a/beacon-chain/p2p/monitoring.go +++ b/beacon-chain/p2p/monitoring.go @@ -6,11 +6,6 @@ import ( ) var ( - p2pTopicPeerCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "p2p_topic_peer_count", - Help: "The number of peers subscribed to a given topic.", - }, - []string{"topic"}) p2pPeerCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "p2p_peer_count", Help: "The number of peers in a given state.", @@ -19,10 +14,6 @@ var ( ) func (s *Service) updateMetrics() { - for topic := range GossipTopicMappings { - topic += s.Encoding().ProtocolSuffix() - p2pTopicPeerCount.WithLabelValues(topic).Set(float64(len(s.pubsub.ListPeers(topic)))) - } p2pPeerCount.WithLabelValues("Connected").Set(float64(len(s.peers.Connected()))) p2pPeerCount.WithLabelValues("Disconnected").Set(float64(len(s.peers.Disconnected()))) p2pPeerCount.WithLabelValues("Connecting").Set(float64(len(s.peers.Connecting()))) diff --git a/beacon-chain/p2p/options.go b/beacon-chain/p2p/options.go index 14b263a836..ae7ac0f752 100644 --- a/beacon-chain/p2p/options.go +++ b/beacon-chain/p2p/options.go @@ -9,7 +9,6 @@ import ( "github.com/libp2p/go-libp2p" noise "github.com/libp2p/go-libp2p-noise" filter "github.com/libp2p/go-maddr-filter" - "github.com/multiformats/go-multiaddr" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/connmgr" @@ -42,7 +41,7 @@ func buildOptions(cfg *Config, ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt options = append(options, libp2p.AddrsFactory(withRelayAddrs(cfg.RelayNodeAddr))) } if cfg.HostAddress != "" { - options = append(options, libp2p.AddrsFactory(func(addrs []multiaddr.Multiaddr) []multiaddr.Multiaddr { + options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr { external, err := multiAddressBuilder(cfg.HostAddress, cfg.TCPPort) if err != nil { log.WithError(err).Error("Unable to create external multiaddress") @@ -53,8 +52,8 @@ func buildOptions(cfg *Config, ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt })) } if cfg.HostDNS != "" { - options = append(options, libp2p.AddrsFactory(func(addrs []multiaddr.Multiaddr) []multiaddr.Multiaddr { - external, err := multiaddr.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%d", cfg.HostDNS, cfg.TCPPort)) + options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr { + external, err := ma.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%d", cfg.HostDNS, cfg.TCPPort)) if err != nil { log.WithError(err).Error("Unable to create external multiaddress") } else { diff --git a/beacon-chain/p2p/peers/BUILD.bazel b/beacon-chain/p2p/peers/BUILD.bazel index 5ee6019858..e48ca45f9e 100644 --- a/beacon-chain/p2p/peers/BUILD.bazel +++ b/beacon-chain/p2p/peers/BUILD.bazel @@ -10,9 +10,12 @@ go_library( "//proto/beacon/p2p/v1:go_default_library", "//shared/bytesutil:go_default_library", "//shared/roughtime:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", + "@com_github_gogo_protobuf//proto:go_default_library", "@com_github_libp2p_go_libp2p_core//network:go_default_library", "@com_github_libp2p_go_libp2p_core//peer:go_default_library", "@com_github_multiformats_go_multiaddr//:go_default_library", + "@com_github_prysmaticlabs_go_bitfield//:go_default_library", ], ) @@ -23,8 +26,10 @@ go_test( deps = [ "//proto/beacon/p2p/v1:go_default_library", "//shared/params:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", "@com_github_libp2p_go_libp2p_core//network:go_default_library", "@com_github_libp2p_go_libp2p_peer//:go_default_library", "@com_github_multiformats_go_multiaddr//:go_default_library", + "@com_github_prysmaticlabs_go_bitfield//:go_default_library", ], ) diff --git a/beacon-chain/p2p/peers/status.go b/beacon-chain/p2p/peers/status.go index 6331df8535..5dbb3013b3 100644 --- a/beacon-chain/p2p/peers/status.go +++ b/beacon-chain/p2p/peers/status.go @@ -25,9 +25,12 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/gogo/protobuf/proto" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" ma "github.com/multiformats/go-multiaddr" + "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" @@ -66,9 +69,10 @@ type peerStatus struct { direction network.Direction peerState PeerConnectionState chainState *pb.Status + enr *enr.Record + metaData *pb.MetaData chainStateLastUpdated time.Time badResponses int - committeeIndices []uint64 } // NewStatus creates a new status entity. @@ -86,7 +90,7 @@ func (p *Status) MaxBadResponses() int { // Add adds a peer. // If a peer already exists with this ID its address and direction are updated with the supplied data. -func (p *Status) Add(pid peer.ID, address ma.Multiaddr, direction network.Direction, indices []uint64) { +func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, direction network.Direction) { p.lock.Lock() defer p.lock.Unlock() @@ -94,19 +98,21 @@ func (p *Status) Add(pid peer.ID, address ma.Multiaddr, direction network.Direct // Peer already exists, just update its address info. status.address = address status.direction = direction - if indices != nil { - status.committeeIndices = indices + if record != nil { + status.enr = record } return } - - p.status[pid] = &peerStatus{ + status := &peerStatus{ address: address, direction: direction, // Peers start disconnected; state will be updated when the handshake process begins. - peerState: PeerDisconnected, - committeeIndices: indices, + peerState: PeerDisconnected, } + if record != nil { + status.enr = record + } + p.status[pid] = status } // Address returns the multiaddress of the given remote peer. @@ -133,6 +139,17 @@ func (p *Status) Direction(pid peer.ID) (network.Direction, error) { return network.DirUnknown, ErrPeerUnknown } +// ENR returns the enr for the corresponding peer id. +func (p *Status) ENR(pid peer.ID) (*enr.Record, error) { + p.lock.RLock() + defer p.lock.RUnlock() + + if status, ok := p.status[pid]; ok { + return status.enr, nil + } + return nil, ErrPeerUnknown +} + // SetChainState sets the chain state of the given remote peer. func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) { p.lock.Lock() @@ -165,16 +182,37 @@ func (p *Status) IsActive(pid peer.ID) bool { return ok && (status.peerState == PeerConnected || status.peerState == PeerConnecting) } +// SetMetadata sets the metadata of the given remote peer. +func (p *Status) SetMetadata(pid peer.ID, metaData *pb.MetaData) { + p.lock.Lock() + defer p.lock.Unlock() + + status := p.fetch(pid) + status.metaData = metaData +} + +// Metadata returns a copy of the metadata corresponding to the provided +// peer id. +func (p *Status) Metadata(pid peer.ID) (*pb.MetaData, error) { + p.lock.RLock() + defer p.lock.RUnlock() + + if status, ok := p.status[pid]; ok { + return proto.Clone(status.metaData).(*pb.MetaData), nil + } + return nil, ErrPeerUnknown +} + // CommitteeIndices retrieves the committee subnets the peer is subscribed to. func (p *Status) CommitteeIndices(pid peer.ID) ([]uint64, error) { p.lock.RLock() defer p.lock.RUnlock() if status, ok := p.status[pid]; ok { - if status.committeeIndices == nil { + if status.enr == nil || status.metaData == nil { return []uint64{}, nil } - return status.committeeIndices, nil + return retrieveIndicesFromBitfield(status.metaData.Attnets), nil } return nil, ErrPeerUnknown } @@ -189,10 +227,12 @@ func (p *Status) SubscribedToSubnet(index uint64) []peer.ID { for pid, status := range p.status { // look at active peers if status.peerState == PeerConnecting || status.peerState == PeerConnected && - status.committeeIndices != nil { - for _, idx := range status.committeeIndices { + status.metaData != nil { + indices := retrieveIndicesFromBitfield(status.metaData.Attnets) + for _, idx := range indices { if idx == index { peers = append(peers, pid) + break } } } @@ -455,3 +495,13 @@ func (p *Status) CurrentEpoch() uint64 { } return helpers.SlotToEpoch(highestSlot) } + +func retrieveIndicesFromBitfield(bitV bitfield.Bitvector64) []uint64 { + committeeIdxs := []uint64{} + for i := uint64(0); i < 64; i++ { + if bitV.BitAt(i) { + committeeIdxs = append(committeeIdxs, i) + } + } + return committeeIdxs +} diff --git a/beacon-chain/p2p/peers/status_test.go b/beacon-chain/p2p/peers/status_test.go index 9be3c95f89..9b81b147da 100644 --- a/beacon-chain/p2p/peers/status_test.go +++ b/beacon-chain/p2p/peers/status_test.go @@ -4,11 +4,14 @@ import ( "bytes" "crypto/rand" "fmt" + "reflect" "testing" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/libp2p/go-libp2p-core/network" peer "github.com/libp2p/go-libp2p-peer" ma "github.com/multiformats/go-multiaddr" + "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/params" @@ -38,7 +41,7 @@ func TestPeerExplicitAdd(t *testing.T) { t.Fatalf("Failed to create address: %v", err) } direction := network.DirInbound - p.Add(id, address, direction, []uint64{}) + p.Add(new(enr.Record), id, address, direction) resAddress, err := p.Address(id) if err != nil { @@ -62,7 +65,7 @@ func TestPeerExplicitAdd(t *testing.T) { t.Fatalf("Failed to create address: %v", err) } direction2 := network.DirOutbound - p.Add(id, address2, direction2, []uint64{}) + p.Add(new(enr.Record), id, address2, direction2) resAddress2, err := p.Address(id) if err != nil { @@ -81,6 +84,58 @@ func TestPeerExplicitAdd(t *testing.T) { } } +func TestPeerNoENR(t *testing.T) { + maxBadResponses := 2 + p := peers.NewStatus(maxBadResponses) + + id, err := peer.IDB58Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") + if err != nil { + t.Fatalf("Failed to create ID: %v", err) + } + address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") + if err != nil { + t.Fatalf("Failed to create address: %v", err) + } + direction := network.DirInbound + p.Add(nil, id, address, direction) + + retrievedENR, err := p.ENR(id) + if err != nil { + t.Fatalf("Could not retrieve chainstate: %v", err) + } + if retrievedENR != nil { + t.Error("Wanted a nil enr to be saved") + } +} + +func TestPeerNoOverwriteENR(t *testing.T) { + maxBadResponses := 2 + p := peers.NewStatus(maxBadResponses) + + id, err := peer.IDB58Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") + if err != nil { + t.Fatalf("Failed to create ID: %v", err) + } + address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") + if err != nil { + t.Fatalf("Failed to create address: %v", err) + } + direction := network.DirInbound + record := new(enr.Record) + record.Set(enr.WithEntry("test", []byte{'a'})) + p.Add(record, id, address, direction) + // try to overwrite + p.Add(nil, id, address, direction) + + retrievedENR, err := p.ENR(id) + if err != nil { + t.Fatalf("Could not retrieve chainstate: %v", err) + } + if retrievedENR == nil { + t.Error("Wanted a non-nil enr") + } +} + func TestErrUnknownPeer(t *testing.T) { maxBadResponses := 2 p := peers.NewStatus(maxBadResponses) @@ -121,6 +176,94 @@ func TestErrUnknownPeer(t *testing.T) { } } +func TestPeerCommitteeIndices(t *testing.T) { + maxBadResponses := 2 + p := peers.NewStatus(maxBadResponses) + + id, err := peer.IDB58Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR") + if err != nil { + t.Fatalf("Failed to create ID: %v", err) + } + address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") + if err != nil { + t.Fatalf("Failed to create address: %v", err) + } + direction := network.DirInbound + record := new(enr.Record) + record.Set(enr.WithEntry("test", []byte{'a'})) + p.Add(record, id, address, direction) + bitV := bitfield.NewBitvector64() + for i := 0; i < 64; i++ { + if i == 2 || i == 8 || i == 9 { + bitV.SetBitAt(uint64(i), true) + } + } + p.SetMetadata(id, &pb.MetaData{ + SeqNumber: 2, + Attnets: bitV, + }) + + wantedIndices := []uint64{2, 8, 9} + + indices, err := p.CommitteeIndices(id) + if err != nil { + t.Fatalf("Could not retrieve committee indices: %v", err) + } + + if !reflect.DeepEqual(indices, wantedIndices) { + t.Errorf("Wanted indices of %v but got %v", wantedIndices, indices) + } +} + +func TestPeerSubscribedToSubnet(t *testing.T) { + maxBadResponses := 2 + p := peers.NewStatus(maxBadResponses) + + // Add some peers with different states + numPeers := 2 + for i := 0; i < numPeers; i++ { + addPeer(t, p, peers.PeerConnected) + } + expectedPeer := p.All()[1] + bitV := bitfield.NewBitvector64() + for i := 0; i < 64; i++ { + if i == 2 || i == 8 || i == 9 { + bitV.SetBitAt(uint64(i), true) + } + } + p.SetMetadata(expectedPeer, &pb.MetaData{ + SeqNumber: 2, + Attnets: bitV, + }) + numPeers = 3 + for i := 0; i < numPeers; i++ { + addPeer(t, p, peers.PeerDisconnected) + } + peers := p.SubscribedToSubnet(2) + if len(peers) != 1 { + t.Errorf("Expected num of peers to be %d but got %d", 1, len(peers)) + } + if peers[0] != expectedPeer { + t.Errorf("Expected peer of %s but got %s", expectedPeer, peers[0]) + } + + peers = p.SubscribedToSubnet(8) + if len(peers) != 1 { + t.Errorf("Expected num of peers to be %d but got %d", 1, len(peers)) + } + if peers[0] != expectedPeer { + t.Errorf("Expected peer of %s but got %s", expectedPeer, peers[0]) + } + + peers = p.SubscribedToSubnet(9) + if len(peers) != 1 { + t.Errorf("Expected num of peers to be %d but got %d", 1, len(peers)) + } + if peers[0] != expectedPeer { + t.Errorf("Expected peer of %s but got %s", expectedPeer, peers[0]) + } +} + func TestPeerImplicitAdd(t *testing.T) { maxBadResponses := 2 p := peers.NewStatus(maxBadResponses) @@ -156,7 +299,7 @@ func TestPeerChainState(t *testing.T) { t.Fatalf("Failed to create address: %v", err) } direction := network.DirInbound - p.Add(id, address, direction, []uint64{}) + p.Add(new(enr.Record), id, address, direction) oldChainStartLastUpdated, err := p.ChainStateLastUpdated(id) if err != nil { @@ -208,7 +351,7 @@ func TestPeerBadResponses(t *testing.T) { t.Fatalf("Failed to create address: %v", err) } direction := network.DirInbound - p.Add(id, address, direction, []uint64{}) + p.Add(new(enr.Record), id, address, direction) resBadResponses, err := p.BadResponses(id) if err != nil { @@ -258,6 +401,32 @@ func TestPeerBadResponses(t *testing.T) { } } +func TestAddMetaData(t *testing.T) { + maxBadResponses := 2 + p := peers.NewStatus(maxBadResponses) + + // Add some peers with different states + numPeers := 5 + for i := 0; i < numPeers; i++ { + addPeer(t, p, peers.PeerConnected) + } + newPeer := p.All()[2] + + newMetaData := &pb.MetaData{ + SeqNumber: 8, + Attnets: bitfield.NewBitvector64(), + } + p.SetMetadata(newPeer, newMetaData) + + md, err := p.Metadata(newPeer) + if err != nil { + t.Fatal(err) + } + if md.SeqNumber != newMetaData.SeqNumber { + t.Errorf("Wanted sequence number of %d but got %d", newMetaData.SeqNumber, md.SeqNumber) + } +} + func TestPeerConnectionStatuses(t *testing.T) { maxBadResponses := 2 p := peers.NewStatus(maxBadResponses) @@ -470,7 +639,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) { p := peers.NewStatus(maxBadResponses) for i := 0; i <= maxPeers+100; i++ { - p.Add(peer.ID(i), nil, network.DirOutbound, []uint64{}) + p.Add(new(enr.Record), peer.ID(i), nil, network.DirOutbound) p.SetConnectionState(peer.ID(i), peers.PeerConnected) p.SetChainState(peer.ID(i), &pb.Status{ FinalizedEpoch: 10, @@ -520,7 +689,11 @@ func addPeer(t *testing.T, p *peers.Status, state peers.PeerConnectionState) pee if err != nil { t.Fatalf("Unexpected error: %v", err) } - p.Add(id, nil, network.DirUnknown, []uint64{}) + p.Add(new(enr.Record), id, nil, network.DirUnknown) p.SetConnectionState(id, state) + p.SetMetadata(id, &pb.MetaData{ + SeqNumber: 0, + Attnets: bitfield.NewBitvector64(), + }) return id } diff --git a/beacon-chain/p2p/rpc_topic_mappings.go b/beacon-chain/p2p/rpc_topic_mappings.go index 7cfa5f3d5b..e918998a65 100644 --- a/beacon-chain/p2p/rpc_topic_mappings.go +++ b/beacon-chain/p2p/rpc_topic_mappings.go @@ -1,27 +1,32 @@ package p2p import ( - "reflect" - p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" ) +const ( + // RPCStatusTopic defines the topic for the status rpc method. + RPCStatusTopic = "/eth2/beacon_chain/req/status/1" + // RPCGoodByeTopic defines the topic for the goodbye rpc method. + RPCGoodByeTopic = "/eth2/beacon_chain/req/goodbye/1" + // RPCBlocksByRangeTopic defines the topic for the blocks by range rpc method. + RPCBlocksByRangeTopic = "/eth2/beacon_chain/req/beacon_blocks_by_range/1" + // RPCBlocksByRootTopic defines the topic for the blocks by root rpc method. + RPCBlocksByRootTopic = "/eth2/beacon_chain/req/beacon_blocks_by_root/1" + // RPCPingTopic defines the topic for the ping rpc method. + RPCPingTopic = "/eth2/beacon_chain/req/ping/1" + // RPCMetaDataTopic defines the topic for the metadata rpc method. + RPCMetaDataTopic = "/eth2/beacon_chain/req/metadata/1" +) + // RPCTopicMappings represent the protocol ID to protobuf message type map for easy // lookup. These mappings should be used for outbound sending only. Peers may respond // with a different message type as defined by the p2p protocol. var RPCTopicMappings = map[string]interface{}{ - "/eth2/beacon_chain/req/status/1": &p2ppb.Status{}, - "/eth2/beacon_chain/req/goodbye/1": new(uint64), - "/eth2/beacon_chain/req/beacon_blocks_by_range/1": &p2ppb.BeaconBlocksByRangeRequest{}, - "/eth2/beacon_chain/req/beacon_blocks_by_root/1": [][32]byte{}, -} - -// RPCTypeMapping is the inverse of RPCTopicMappings so that an arbitrary protobuf message -// can be mapped to a protocol ID string. -var RPCTypeMapping = make(map[reflect.Type]string) - -func init() { - for k, v := range RPCTopicMappings { - RPCTypeMapping[reflect.TypeOf(v)] = k - } + RPCStatusTopic: &p2ppb.Status{}, + RPCGoodByeTopic: new(uint64), + RPCBlocksByRangeTopic: &p2ppb.BeaconBlocksByRangeRequest{}, + RPCBlocksByRootTopic: [][32]byte{}, + RPCPingTopic: new(uint64), + RPCMetaDataTopic: new(interface{}), } diff --git a/beacon-chain/p2p/sender.go b/beacon-chain/p2p/sender.go index 8a7ea8e9f9..d24302056e 100644 --- a/beacon-chain/p2p/sender.go +++ b/beacon-chain/p2p/sender.go @@ -2,7 +2,6 @@ package p2p import ( "context" - "reflect" "time" "github.com/libp2p/go-libp2p-core/network" @@ -14,10 +13,10 @@ import ( // Send a message to a specific peer. The returned stream may be used for reading, but has been // closed for writing. -func (s *Service) Send(ctx context.Context, message interface{}, pid peer.ID) (network.Stream, error) { +func (s *Service) Send(ctx context.Context, message interface{}, baseTopic string, pid peer.ID) (network.Stream, error) { ctx, span := trace.StartSpan(ctx, "p2p.Send") defer span.End() - topic := RPCTypeMapping[reflect.TypeOf(message)] + s.Encoding().ProtocolSuffix() + topic := baseTopic + s.Encoding().ProtocolSuffix() span.AddAttributes(trace.StringAttribute("topic", topic)) // TTFB_TIME (5s) + RESP_TIMEOUT (10s). @@ -38,6 +37,11 @@ func (s *Service) Send(ctx context.Context, message interface{}, pid peer.ID) (n traceutil.AnnotateError(span, err) return nil, err } + // do not encode anything if we are sending a metadata request + if baseTopic == RPCMetaDataTopic { + return stream, nil + } + if _, err := s.Encoding().EncodeWithLength(stream, message); err != nil { traceutil.AnnotateError(span, err) return nil, err diff --git a/beacon-chain/p2p/sender_test.go b/beacon-chain/p2p/sender_test.go index 2a962ce9ed..9a7e4eb479 100644 --- a/beacon-chain/p2p/sender_test.go +++ b/beacon-chain/p2p/sender_test.go @@ -2,7 +2,6 @@ package p2p import ( "context" - "reflect" "sync" "testing" "time" @@ -29,29 +28,25 @@ func TestService_Send(t *testing.T) { Bar: 55, } - // Register testing topic. - RPCTypeMapping[reflect.TypeOf(msg)] = "/testing/1" - // Register external listener which will repeat the message back. var wg sync.WaitGroup wg.Add(1) - go func() { - p2.SetStreamHandler("/testing/1/ssz", func(stream network.Stream) { - rcvd := &testpb.TestSimpleMessage{} - if err := svc.Encoding().DecodeWithLength(stream, rcvd); err != nil { - t.Fatal(err) - } - if _, err := svc.Encoding().EncodeWithLength(stream, rcvd); err != nil { - t.Fatal(err) - } - if err := stream.Close(); err != nil { - t.Error(err) - } - wg.Done() - }) - }() - stream, err := svc.Send(context.Background(), msg, p2.Host.ID()) + p2.SetStreamHandler("/testing/1/ssz", func(stream network.Stream) { + rcvd := &testpb.TestSimpleMessage{} + if err := svc.Encoding().DecodeWithLength(stream, rcvd); err != nil { + t.Fatal(err) + } + if _, err := svc.Encoding().EncodeWithLength(stream, rcvd); err != nil { + t.Fatal(err) + } + if err := stream.Close(); err != nil { + t.Error(err) + } + wg.Done() + }) + + stream, err := svc.Send(context.Background(), msg, "/testing/1", p2.Host.ID()) if err != nil { t.Fatal(err) } @@ -65,5 +60,4 @@ func TestService_Send(t *testing.T) { if !proto.Equal(rcvd, msg) { t.Errorf("Expected identical message to be received. got %v want %v", rcvd, msg) } - } diff --git a/beacon-chain/p2p/service.go b/beacon-chain/p2p/service.go index 0f3dfcba61..278b08416d 100644 --- a/beacon-chain/p2p/service.go +++ b/beacon-chain/p2p/service.go @@ -1,6 +1,7 @@ package p2p import ( + "bytes" "context" "crypto/ecdsa" "strconv" @@ -10,6 +11,7 @@ import ( "github.com/dgraph-io/ristretto" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/gogo/protobuf/proto" ds "github.com/ipfs/go-datastore" dsync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p" @@ -25,10 +27,16 @@ import ( "github.com/pkg/errors" "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/prysm/beacon-chain/cache" + "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" + statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared" + "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/runutil" + "github.com/prysmaticlabs/prysm/shared/sliceutil" "github.com/sirupsen/logrus" ) @@ -37,6 +45,9 @@ var _ = shared.Service(&Service{}) // Check local table every 5 seconds for newly added peers. var pollingPeriod = 5 * time.Second +// Refresh rate of ENR set at twice per slot. +var refreshRate = time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second + // search limit for number of peers in discovery v5. const searchLimit = 100 @@ -47,18 +58,24 @@ const maxBadResponses = 3 // Service for managing peer to peer (p2p) networking. type Service struct { - ctx context.Context - cancel context.CancelFunc - started bool - cfg *Config - startupErr error - dv5Listener Listener - host host.Host - pubsub *pubsub.PubSub - exclusionList *ristretto.Cache - privKey *ecdsa.PrivateKey - dht *kaddht.IpfsDHT - peers *peers.Status + started bool + isPreGenesis bool + pingMethod func(ctx context.Context, id peer.ID) error + cancel context.CancelFunc + cfg *Config + peers *peers.Status + dht *kaddht.IpfsDHT + privKey *ecdsa.PrivateKey + exclusionList *ristretto.Cache + metaData *pb.MetaData + pubsub *pubsub.PubSub + dv5Listener Listener + startupErr error + stateNotifier statefeed.Notifier + ctx context.Context + host host.Host + genesisTime time.Time + genesisValidatorsRoot []byte } // NewService initializes a new p2p service compatible with shared.Service interface. No @@ -77,9 +94,11 @@ func NewService(cfg *Config) (*Service, error) { s := &Service{ ctx: ctx, + stateNotifier: cfg.StateNotifier, cancel: cancel, cfg: cfg, exclusionList: cache, + isPreGenesis: true, } dv5Nodes, kadDHTNodes := parseBootStrapAddrs(s.cfg.BootstrapNodeAddr) @@ -93,6 +112,11 @@ func NewService(cfg *Config) (*Service, error) { log.WithError(err).Error("Failed to generate p2p private key") return nil, err } + s.metaData, err = metaDataFromConfig(s.cfg) + if err != nil { + log.WithError(err).Error("Failed to create peer metadata") + return nil, err + } opts := buildOptions(s.cfg, ipAddr, s.privKey) h, err := libp2p.New(s.ctx, opts...) @@ -148,6 +172,11 @@ func (s *Service) Start() { return } + // Waits until the state is initialized via an event feed. + // Used for fork-related data when connecting peers. + s.awaitStateInitialized() + s.isPreGenesis = false + var peersToWatch []string if s.cfg.RelayNodeAddr != "" { peersToWatch = append(peersToWatch, s.cfg.RelayNodeAddr) @@ -161,9 +190,12 @@ func (s *Service) Start() { s.host.ConnManager().Protect(peer.ID, "relay") } - if (len(s.cfg.Discv5BootStrapAddr) != 0 && !s.cfg.NoDiscovery) || s.cfg.EnableDiscv5 { + if !s.cfg.NoDiscovery && !s.cfg.DisableDiscv5 { ipAddr := ipAddr() - listener, err := startDiscoveryV5(ipAddr, s.privKey, s.cfg) + listener, err := s.startDiscoveryV5( + ipAddr, + s.privKey, + ) if err != nil { log.WithError(err).Error("Failed to start discovery") s.startupErr = err @@ -221,6 +253,10 @@ func (s *Service) Start() { }) runutil.RunEvery(s.ctx, time.Hour, s.Peers().Decay) runutil.RunEvery(s.ctx, 10*time.Second, s.updateMetrics) + runutil.RunEvery(s.ctx, refreshRate, func() { + currentEpoch := helpers.SlotToEpoch(helpers.SlotsSince(s.genesisTime)) + s.RefreshENR(currentEpoch) + }) multiAddrs := s.host.Network().ListenAddresses() logIPAddr(s.host.ID(), multiAddrs...) @@ -251,9 +287,15 @@ func (s *Service) Stop() error { // Status of the p2p service. Will return an error if the service is considered unhealthy to // indicate that this node should not serve traffic until the issue has been resolved. func (s *Service) Status() error { + if s.isPreGenesis { + return nil + } if !s.started { return errors.New("not running") } + if s.startupErr != nil { + return s.startupErr + } return nil } @@ -301,6 +343,16 @@ func (s *Service) Peers() *peers.Status { return s.peers } +// Metadata returns a copy of the peer's metadata. +func (s *Service) Metadata() *pb.MetaData { + return proto.Clone(s.metaData).(*pb.MetaData) +} + +// MetadataSeq returns the metadata sequence number. +func (s *Service) MetadataSeq() uint64 { + return s.metaData.SeqNumber +} + // RefreshENR uses an epoch to refresh the enr entry for our node // with the tracked committee id's for the epoch, allowing our node // to be dynamically discoverable by others given our tracked committee id's. @@ -310,12 +362,28 @@ func (s *Service) RefreshENR(epoch uint64) { return } bitV := bitfield.NewBitvector64() - committees := cache.CommitteeIDs.GetIDs(epoch) + + var committees []uint64 + epochStartSlot := helpers.StartSlot(epoch) + for i := epochStartSlot; i < epochStartSlot+2*params.BeaconConfig().SlotsPerEpoch; i++ { + committees = append(committees, sliceutil.UnionUint64(cache.CommitteeIDs.GetAttesterCommitteeIDs(i), + cache.CommitteeIDs.GetAggregatorCommitteeIDs(i))...) + } for _, idx := range committees { bitV.SetBitAt(idx, true) } - entry := enr.WithEntry(attSubnetEnrKey, &bitV) - s.dv5Listener.LocalNode().Set(entry) + currentBitV, err := retrieveBitvector(s.dv5Listener.Self().Record()) + if err != nil { + log.Errorf("Could not retrieve bitfield: %v", err) + return + } + if bytes.Equal(bitV, currentBitV) { + // return early if bitfield hasn't changed + return + } + s.updateSubnetRecordWithMetadata(bitV) + // ping all peers to inform them of new metadata + s.pingPeers() } // FindPeersWithSubnet performs a network search for peers @@ -329,9 +397,17 @@ func (s *Service) FindPeersWithSubnet(index uint64) (bool, error) { if node.IP() == nil { continue } + // do not look for nodes with no tcp port set + if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil { + if !enr.IsNotFound(err) { + log.WithError(err).Error("Could not retrieve tcp port") + } + continue + } subnets, err := retrieveAttSubnets(node.Record()) if err != nil { - return false, errors.Wrap(err, "could not retrieve subnets") + log.Errorf("could not retrieve subnets: %v", err) + continue } for _, comIdx := range subnets { if comIdx == index { @@ -351,7 +427,7 @@ func (s *Service) FindPeersWithSubnet(index uint64) (bool, error) { exists = true continue } - s.peers.Add(info.ID, multiAddr, network.DirUnknown, subnets) + s.peers.Add(node.Record(), info.ID, multiAddr, network.DirUnknown) if err := s.connectWithPeer(*info); err != nil { log.Errorf("Could not connect with peer %s: %v", info.String(), err) } @@ -362,6 +438,48 @@ func (s *Service) FindPeersWithSubnet(index uint64) (bool, error) { return exists, nil } +// AddPingMethod adds the metadata ping rpc method to the p2p service, so that it can +// be used to refresh ENR. +func (s *Service) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) { + s.pingMethod = reqFunc +} + +func (s *Service) pingPeers() { + if s.pingMethod == nil { + return + } + for _, pid := range s.peers.Connected() { + go func(id peer.ID) { + if err := s.pingMethod(s.ctx, id); err != nil { + log.WithField("peer", id).WithError(err).Error("Failed to ping peer") + } + }(pid) + } +} + +// Waits for the beacon state to be initialized, important +// for initializing the p2p service as p2p needs to be aware +// of genesis information for peering. +func (s *Service) awaitStateInitialized() { + stateChannel := make(chan *feed.Event, 1) + stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel) + defer stateSub.Unsubscribe() + for { + select { + case event := <-stateChannel: + if event.Type == statefeed.Initialized { + data, ok := event.Data.(*statefeed.InitializedData) + if !ok { + log.Fatalf("Received wrong data over state initialized feed: %v", data) + } + s.genesisTime = data.StartTime + s.genesisValidatorsRoot = data.GenesisValidatorsRoot + return + } + } + } +} + // listen for new nodes watches for new nodes in the network and adds them to the peerstore. func (s *Service) listenForNewNodes() { runutil.RunEvery(s.ctx, pollingPeriod, func() { @@ -414,6 +532,13 @@ func (s *Service) processPeers(nodes []*enode.Node) []ma.Multiaddr { if node.IP() == nil { continue } + // do not dial nodes with their tcp ports not set + if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil { + if !enr.IsNotFound(err) { + log.WithError(err).Error("Could not retrieve tcp port") + } + continue + } multiAddr, err := convertToSingleMultiAddr(node) if err != nil { log.WithError(err).Error("Could not convert to multiAddr") @@ -430,13 +555,19 @@ func (s *Service) processPeers(nodes []*enode.Node) []ma.Multiaddr { if s.host.Network().Connectedness(peerData.ID) == network.Connected { continue } - indices, err := retrieveAttSubnets(node.Record()) - if err != nil { - log.WithError(err).Error("Could not retrieve attestation subnets") - continue + + nodeENR := node.Record() + // Decide whether or not to connect to peer that does not + // match the proper fork ENR data with our local node. + if s.genesisValidatorsRoot != nil { + if err := s.compareForkENR(nodeENR); err != nil { + log.WithError(err).Debug("Fork ENR mismatches between peer and local node") + continue + } } - // add peer to peer handler. - s.peers.Add(peerData.ID, multiAddr, network.DirUnknown, indices) + + // Add peer to peer handler. + s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown) multiAddrs = append(multiAddrs, multiAddr) } return multiAddrs @@ -449,6 +580,13 @@ func (s *Service) connectToBootnodes() error { if err != nil { return err } + // do not dial bootnodes with their tcp ports not set + if err := bootNode.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil { + if !enr.IsNotFound(err) { + log.WithError(err).Error("Could not retrieve tcp port") + } + continue + } nodes = append(nodes, bootNode) } multiAddresses := convertToMultiAddr(nodes) @@ -470,6 +608,19 @@ func (s *Service) addKadDHTNodesToExclusionList(addr string) error { return nil } +// Updates the service's discv5 listener record's attestation subnet +// with a new value for a bitfield of subnets tracked. It also updates +// the node's metadata by increasing the sequence number and the +// subnets tracked by the node. +func (s *Service) updateSubnetRecordWithMetadata(bitV bitfield.Bitvector64) { + entry := enr.WithEntry(attSubnetEnrKey, &bitV) + s.dv5Listener.LocalNode().Set(entry) + s.metaData = &pb.MetaData{ + SeqNumber: s.metaData.SeqNumber + 1, + Attnets: bitV, + } +} + func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) { var correctAddr ma.Multiaddr for _, addr := range addrs { diff --git a/beacon-chain/p2p/service_test.go b/beacon-chain/p2p/service_test.go index 7f08268c66..3b3f52fc3b 100644 --- a/beacon-chain/p2p/service_test.go +++ b/beacon-chain/p2p/service_test.go @@ -14,6 +14,10 @@ import ( "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" multiaddr "github.com/multiformats/go-multiaddr" + mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" + statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" + testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/shared/testutil" logTest "github.com/sirupsen/logrus/hooks/test" ) @@ -56,17 +60,6 @@ func (mockListener) LocalNode() *enode.LocalNode { panic("implement me") } -func createPeer(t *testing.T, cfg *Config, port int) (Listener, host.Host) { - h, pkey, ipAddr := createHost(t, port) - cfg.UDPPort = uint(port) - cfg.TCPPort = uint(port) - listener, err := startDiscoveryV5(ipAddr, pkey, cfg) - if err != nil { - t.Errorf("Could not start discovery for node: %v", err) - } - return listener, h -} - func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) { ipAddr, pkey := createAddrAndPrivKey(t) ipAddr = net.ParseIP("127.0.0.1") @@ -108,6 +101,8 @@ func TestService_Stop_DontPanicIfDv5ListenerIsNotInited(t *testing.T) { } func TestService_Start_OnlyStartsOnce(t *testing.T) { + db := testDB.SetupDB(t) + defer testDB.TeardownDB(t, db) hook := logTest.NewGlobal() cfg := &Config{ @@ -119,18 +114,33 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) { if err != nil { t.Fatal(err) } + s.stateNotifier = &mock.MockStateNotifier{} s.dv5Listener = &mockListener{} - defer func() { - if err := s.Stop(); err != nil { - t.Log(err) - } + exitRoutine := make(chan bool) + go func() { + s.Start() + <-exitRoutine }() - s.Start() + // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). + for sent := 0; sent == 0; { + sent = s.stateNotifier.StateFeed().Send(&feed.Event{ + Type: statefeed.Initialized, + Data: &statefeed.InitializedData{ + StartTime: time.Now(), + GenesisValidatorsRoot: make([]byte, 32), + }, + }) + } + time.Sleep(time.Second * 2) if s.started != true { t.Error("Expected service to be started") } s.Start() testutil.AssertLogsContain(t, hook, "Attempted to start p2p service when it was already started") + if err := s.Stop(); err != nil { + t.Fatal(err) + } + exitRoutine <- true } func TestService_Status_NotRunning(t *testing.T) { @@ -142,13 +152,22 @@ func TestService_Status_NotRunning(t *testing.T) { } func TestListenForNewNodes(t *testing.T) { - // setup bootnode + // Setup bootnode. + db := testDB.SetupDB(t) + defer testDB.TeardownDB(t, db) cfg := &Config{} port := 2000 cfg.UDPPort = uint(port) _, pkey := createAddrAndPrivKey(t) ipAddr := net.ParseIP("127.0.0.1") - bootListener := createListener(ipAddr, pkey, cfg) + genesisTime := time.Now() + genesisValidatorsRoot := make([]byte, 32) + s := &Service{ + cfg: cfg, + genesisTime: genesisTime, + genesisValidatorsRoot: genesisValidatorsRoot, + } + bootListener := s.createListener(ipAddr, pkey) defer bootListener.Close() // Use shorter period for testing. @@ -160,20 +179,37 @@ func TestListenForNewNodes(t *testing.T) { bootNode := bootListener.Self() + var listeners []*discover.UDPv5 + var hosts []host.Host + // setup other nodes. cfg = &Config{ BootstrapNodeAddr: []string{bootNode.String()}, Discv5BootStrapAddr: []string{bootNode.String()}, Encoding: "ssz", MaxPeers: 30, } - var listeners []*discover.UDPv5 - var hosts []host.Host - // setup other nodes for i := 1; i <= 5; i++ { - listener, h := createPeer(t, cfg, port+i) - listeners = append(listeners, listener.(*discover.UDPv5)) + h, pkey, ipAddr := createHost(t, port+i) + cfg.UDPPort = uint(port + i) + cfg.TCPPort = uint(port + i) + s := &Service{ + cfg: cfg, + genesisTime: genesisTime, + genesisValidatorsRoot: genesisValidatorsRoot, + } + listener, err := s.startDiscoveryV5(ipAddr, pkey) + if err != nil { + t.Errorf("Could not start discovery for node: %v", err) + } + listeners = append(listeners, listener) hosts = append(hosts, h) } + defer func() { + // Close down all peers. + for _, listener := range listeners { + listener.Close() + } + }() // close peers upon exit of test defer func() { @@ -184,31 +220,38 @@ func TestListenForNewNodes(t *testing.T) { } }() - cfg.TCPPort = 14001 cfg.UDPPort = 14000 + cfg.TCPPort = 14001 s, err := NewService(cfg) if err != nil { t.Fatal(err) } - - s.Start() - defer func() { - if err := s.Stop(); err != nil { - t.Log(err) - } + s.stateNotifier = &mock.MockStateNotifier{} + exitRoutine := make(chan bool) + go func() { + s.Start() + <-exitRoutine }() - + // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). + for sent := 0; sent == 0; { + sent = s.stateNotifier.StateFeed().Send(&feed.Event{ + Type: statefeed.Initialized, + Data: &statefeed.InitializedData{ + StartTime: genesisTime, + GenesisValidatorsRoot: genesisValidatorsRoot, + }, + }) + } time.Sleep(4 * time.Second) peers := s.host.Network().Peers() if len(peers) != 5 { t.Errorf("Not all peers added to peerstore, wanted %d but got %d", 5, len(peers)) } - - // close down all peers - for _, listener := range listeners { - listener.Close() + if err := s.Stop(); err != nil { + t.Fatal(err) } + exitRoutine <- true } func TestPeer_Disconnect(t *testing.T) { diff --git a/beacon-chain/p2p/subnets.go b/beacon-chain/p2p/subnets.go new file mode 100644 index 0000000000..b73eb3bcdb --- /dev/null +++ b/beacon-chain/p2p/subnets.go @@ -0,0 +1,41 @@ +package p2p + +import ( + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/prysmaticlabs/go-bitfield" +) + +const attestationSubnetCount = 64 +const attSubnetEnrKey = "attnets" + +func intializeAttSubnets(node *enode.LocalNode) *enode.LocalNode { + bitV := bitfield.NewBitvector64() + entry := enr.WithEntry(attSubnetEnrKey, bitV.Bytes()) + node.Set(entry) + return node +} + +func retrieveAttSubnets(record *enr.Record) ([]uint64, error) { + bitV, err := retrieveBitvector(record) + if err != nil { + return nil, err + } + committeeIdxs := []uint64{} + for i := uint64(0); i < attestationSubnetCount; i++ { + if bitV.BitAt(i) { + committeeIdxs = append(committeeIdxs, i) + } + } + return committeeIdxs, nil +} + +func retrieveBitvector(record *enr.Record) (bitfield.Bitvector64, error) { + bitV := bitfield.NewBitvector64() + entry := enr.WithEntry(attSubnetEnrKey, &bitV) + err := record.Load(entry) + if err != nil { + return nil, err + } + return bitV, nil +} diff --git a/beacon-chain/p2p/subnets_test.go b/beacon-chain/p2p/subnets_test.go new file mode 100644 index 0000000000..ca3236de72 --- /dev/null +++ b/beacon-chain/p2p/subnets_test.go @@ -0,0 +1,146 @@ +package p2p + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/prysmaticlabs/go-bitfield" + mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/cache" + "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" + statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" + testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" +) + +func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) { + db := testDB.SetupDB(t) + defer testDB.TeardownDB(t, db) + port := 2000 + ipAddr, pkey := createAddrAndPrivKey(t) + genesisTime := time.Now() + genesisValidatorsRoot := make([]byte, 32) + s := &Service{ + cfg: &Config{UDPPort: uint(port)}, + genesisTime: genesisTime, + genesisValidatorsRoot: genesisValidatorsRoot, + } + bootListener := s.createListener(ipAddr, pkey) + defer bootListener.Close() + + bootNode := bootListener.Self() + // Use shorter period for testing. + currentPeriod := pollingPeriod + pollingPeriod = 1 * time.Second + defer func() { + pollingPeriod = currentPeriod + }() + + var listeners []*discover.UDPv5 + for i := 1; i <= 3; i++ { + port = 3000 + i + cfg := &Config{ + BootstrapNodeAddr: []string{bootNode.String()}, + Discv5BootStrapAddr: []string{bootNode.String()}, + Encoding: "ssz", + MaxPeers: 30, + UDPPort: uint(port), + } + ipAddr, pkey := createAddrAndPrivKey(t) + s = &Service{ + cfg: cfg, + genesisTime: genesisTime, + genesisValidatorsRoot: genesisValidatorsRoot, + } + listener, err := s.startDiscoveryV5(ipAddr, pkey) + if err != nil { + t.Errorf("Could not start discovery for node: %v", err) + } + bitV := bitfield.NewBitvector64() + bitV.SetBitAt(uint64(i), true) + + entry := enr.WithEntry(attSubnetEnrKey, &bitV) + listener.LocalNode().Set(entry) + listeners = append(listeners, listener) + } + defer func() { + // Close down all peers. + for _, listener := range listeners { + listener.Close() + } + }() + + // Make one service on port 3001. + port = 4000 + cfg := &Config{ + BootstrapNodeAddr: []string{bootNode.String()}, + Discv5BootStrapAddr: []string{bootNode.String()}, + Encoding: "ssz", + MaxPeers: 30, + UDPPort: uint(port), + } + cfg.StateNotifier = &mock.MockStateNotifier{} + s, err := NewService(cfg) + if err != nil { + t.Fatal(err) + } + exitRoutine := make(chan bool) + go func() { + s.Start() + <-exitRoutine + }() + // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). + for sent := 0; sent == 0; { + sent = s.stateNotifier.StateFeed().Send(&feed.Event{ + Type: statefeed.Initialized, + Data: &statefeed.InitializedData{ + StartTime: time.Now(), + GenesisValidatorsRoot: make([]byte, 32), + }, + }) + } + + // Wait for the nodes to have their local routing tables to be populated with the other nodes + time.Sleep(2 * discoveryWaitTime) + + // look up 3 different subnets + exists, err := s.FindPeersWithSubnet(1) + if err != nil { + t.Fatal(err) + } + exists2, err := s.FindPeersWithSubnet(2) + if err != nil { + t.Fatal(err) + } + exists3, err := s.FindPeersWithSubnet(3) + if err != nil { + t.Fatal(err) + } + if !exists || !exists2 || !exists3 { + t.Fatal("Peer with subnet doesn't exist") + } + + // Update ENR of a peer. + testService := &Service{ + dv5Listener: listeners[0], + metaData: &pb.MetaData{}, + } + cache.CommitteeIDs.AddAttesterCommiteeID(0, 10) + testService.RefreshENR(0) + time.Sleep(2 * time.Second) + + exists, err = s.FindPeersWithSubnet(2) + if err != nil { + t.Fatal(err) + } + + if !exists { + t.Fatal("Peer with subnet doesn't exist") + } + if err := s.Stop(); err != nil { + t.Fatal(err) + } + exitRoutine <- true +} diff --git a/beacon-chain/p2p/testing/BUILD.bazel b/beacon-chain/p2p/testing/BUILD.bazel index f3f5e73416..41dc9a9443 100644 --- a/beacon-chain/p2p/testing/BUILD.bazel +++ b/beacon-chain/p2p/testing/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//beacon-chain/p2p/encoder:go_default_library", "//beacon-chain/p2p/peers:go_default_library", "//proto/beacon/p2p/v1:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", "@com_github_gogo_protobuf//proto:go_default_library", "@com_github_libp2p_go_libp2p_blankhost//:go_default_library", "@com_github_libp2p_go_libp2p_core//:go_default_library", diff --git a/beacon-chain/p2p/testing/mock_peersprovider.go b/beacon-chain/p2p/testing/mock_peersprovider.go index f1f3c345ba..aa42c2a4d2 100644 --- a/beacon-chain/p2p/testing/mock_peersprovider.go +++ b/beacon-chain/p2p/testing/mock_peersprovider.go @@ -3,6 +3,7 @@ package testing import ( "sync" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/libp2p/go-libp2p-core/network" peer "github.com/libp2p/go-libp2p-peer" ma "github.com/multiformats/go-multiaddr" @@ -30,9 +31,9 @@ func (m *MockPeersProvider) Peers() *peers.Status { } ma0, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000") if err != nil { - log.WithError(err).Debug("Cannot create address") + log.WithError(err).Debug("Cannot decode") } - m.peers.Add(id0, ma0, network.DirInbound, []uint64{}) + m.peers.Add(new(enr.Record), id0, ma0, network.DirInbound) m.peers.SetConnectionState(id0, peers.PeerConnected) m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: uint64(10)}) id1, err := peer.IDB58Decode("16Uiu2HAm4HgJ9N1o222xK61o7LSgToYWoAy1wNTJRkh9gLZapVAy") @@ -41,9 +42,9 @@ func (m *MockPeersProvider) Peers() *peers.Status { } ma1, err := ma.NewMultiaddr("/ip4/52.23.23.253/tcp/30000/ipfs/QmfAgkmjiZNZhr2wFN9TwaRgHouMTBT6HELyzE5A3BT2wK/p2p-circuit") if err != nil { - log.WithError(err).Debug("Cannot create address") + log.WithError(err).Debug("Cannot decode") } - m.peers.Add(id1, ma1, network.DirOutbound, []uint64{}) + m.peers.Add(new(enr.Record), id1, ma1, network.DirOutbound) m.peers.SetConnectionState(id1, peers.PeerConnected) m.peers.SetChainState(id1, &pb.Status{FinalizedEpoch: uint64(11)}) } diff --git a/beacon-chain/p2p/testing/p2p.go b/beacon-chain/p2p/testing/p2p.go index 5dbcd872f0..28836785ab 100644 --- a/beacon-chain/p2p/testing/p2p.go +++ b/beacon-chain/p2p/testing/p2p.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/gogo/protobuf/proto" bhost "github.com/libp2p/go-libp2p-blankhost" core "github.com/libp2p/go-libp2p-core" @@ -29,6 +30,7 @@ var TopicMappings = map[reflect.Type]string{ reflect.TypeOf(new(uint64)): "/eth2/beacon_chain/req/goodbye/1", reflect.TypeOf(&pb.BeaconBlocksByRangeRequest{}): "/eth2/beacon_chain/req/beacon_blocks_by_range/1", reflect.TypeOf([][32]byte{}): "/eth2/beacon_chain/req/beacon_blocks_by_root/1", + reflect.TypeOf(new(uint64)): "/eth2/beacon_chain/req/ping/1/", } // TestP2P represents a p2p implementation that can be used for testing. @@ -39,6 +41,7 @@ type TestP2P struct { BroadcastCalled bool DelaySend bool peers *peers.Status + LocalMetadata *pb.MetaData } // NewTestP2P initializes a new p2p test service. @@ -119,8 +122,14 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) { if _, err := p.Encoding().Encode(buf, msg); err != nil { p.t.Fatalf("Failed to encode message: %v", err) } + digest, err := p.ForkDigest() + if err != nil { + p.t.Fatal(err) + } + topic = fmt.Sprintf(topic, digest) + topic = topic + p.Encoding().ProtocolSuffix() - if err := ps.Publish(topic+p.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil { + if err := ps.Publish(topic, buf.Bytes()); err != nil { p.t.Fatalf("Failed to publish message; %v", err) } } @@ -163,7 +172,7 @@ func (p *TestP2P) AddConnectionHandler(f func(ctx context.Context, id peer.ID) e ConnectedF: func(net network.Network, conn network.Conn) { // Must be handled in a goroutine as this callback cannot be blocking. go func() { - p.peers.Add(conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction, []uint64{}) + p.peers.Add(new(enr.Record), conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction) ctx := context.Background() p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting) @@ -198,8 +207,8 @@ func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID } // Send a message to a specific peer. -func (p *TestP2P) Send(ctx context.Context, msg interface{}, pid peer.ID) (network.Stream, error) { - protocol := TopicMappings[reflect.TypeOf(msg)] +func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid peer.ID) (network.Stream, error) { + protocol := topic if protocol == "" { return nil, fmt.Errorf("protocol doesnt exist for proto message: %v", msg) } @@ -208,8 +217,10 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, pid peer.ID) (netwo return nil, err } - if _, err := p.Encoding().EncodeWithLength(stream, msg); err != nil { - return nil, err + if topic != "/eth2/beacon_chain/req/metadata/1" { + if _, err := p.Encoding().EncodeWithLength(stream, msg); err != nil { + return nil, err + } } // Close stream for writing. @@ -243,3 +254,23 @@ func (p *TestP2P) FindPeersWithSubnet(index uint64) (bool, error) { func (p *TestP2P) RefreshENR(epoch uint64) { return } + +// ForkDigest mocks the p2p func. +func (p *TestP2P) ForkDigest() ([4]byte, error) { + return [4]byte{}, nil +} + +// Metadata mocks the peer's metadata. +func (p *TestP2P) Metadata() *pb.MetaData { + return proto.Clone(p.LocalMetadata).(*pb.MetaData) +} + +// MetadataSeq mocks metadata sequence number. +func (p *TestP2P) MetadataSeq() uint64 { + return p.LocalMetadata.SeqNumber +} + +// AddPingMethod mocks the p2p func. +func (p *TestP2P) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) { + // no-op +} diff --git a/beacon-chain/p2p/utils.go b/beacon-chain/p2p/utils.go index 742f071975..2d632170aa 100644 --- a/beacon-chain/p2p/utils.go +++ b/beacon-chain/p2p/utils.go @@ -12,10 +12,13 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/libp2p/go-libp2p-core/crypto" "github.com/pkg/errors" + "github.com/prysmaticlabs/go-bitfield" + pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/iputils" ) const keyPath = "network-keys" +const metaDataPath = "metaData" func convertFromInterfacePrivKey(privkey crypto.PrivKey) *ecdsa.PrivateKey { typeAssertedKey := (*ecdsa.PrivateKey)((*btcec.PrivateKey)(privkey.(*crypto.Secp256k1PrivateKey))) @@ -83,6 +86,44 @@ func retrievePrivKeyFromFile(path string) (*ecdsa.PrivateKey, error) { return convertFromInterfacePrivKey(unmarshalledKey), nil } +func metaDataFromConfig(cfg *Config) (*pbp2p.MetaData, error) { + defaultKeyPath := path.Join(cfg.DataDir, metaDataPath) + metaDataPath := cfg.MetaDataDir + + _, err := os.Stat(defaultKeyPath) + defaultMetadataExist := !os.IsNotExist(err) + if err != nil && defaultMetadataExist { + return nil, err + } + if metaDataPath == "" && !defaultMetadataExist { + metaData := &pbp2p.MetaData{ + SeqNumber: 0, + Attnets: bitfield.NewBitvector64(), + } + dst, err := metaData.Marshal() + if err != nil { + return nil, err + } + if err = ioutil.WriteFile(defaultKeyPath, dst, 0600); err != nil { + return nil, err + } + return metaData, nil + } + if defaultMetadataExist && metaDataPath == "" { + metaDataPath = defaultKeyPath + } + src, err := ioutil.ReadFile(metaDataPath) + if err != nil { + log.WithError(err).Error("Error reading metadata from file") + return nil, err + } + metaData := &pbp2p.MetaData{} + if err := metaData.Unmarshal(src); err != nil { + return nil, err + } + return metaData, nil +} + func ipAddr() net.IP { ip, err := iputils.ExternalIPv4() if err != nil { diff --git a/beacon-chain/powchain/block_reader.go b/beacon-chain/powchain/block_reader.go index 4a9caf013a..3f11b2d5d3 100644 --- a/beacon-chain/powchain/block_reader.go +++ b/beacon-chain/powchain/block_reader.go @@ -2,6 +2,7 @@ package powchain import ( "context" + "fmt" "math/big" "github.com/ethereum/go-ethereum/common" @@ -66,7 +67,7 @@ func (s *Service) BlockHashByHeight(ctx context.Context, height *big.Int) (commo span.AddAttributes(trace.BoolAttribute("blockCacheHit", false)) block, err := s.blockFetcher.BlockByNumber(ctx, height) if err != nil { - return [32]byte{}, errors.Wrap(err, "could not query block with given height") + return [32]byte{}, errors.Wrap(err, fmt.Sprintf("could not query block with height %d", height.Uint64())) } if err := s.blockCache.AddBlock(block); err != nil { return [32]byte{}, err @@ -88,7 +89,7 @@ func (s *Service) BlockTimeByHeight(ctx context.Context, height *big.Int) (uint6 span.AddAttributes(trace.BoolAttribute("blockCacheHit", false)) block, err := s.blockFetcher.BlockByNumber(ctx, height) if err != nil { - return 0, errors.Wrap(err, "could not query block with given height") + return 0, errors.Wrap(err, fmt.Sprintf("could not query block with height %d", height.Uint64())) } return block.Time(), nil } diff --git a/beacon-chain/powchain/deposit_test.go b/beacon-chain/powchain/deposit_test.go index 4c4eb0d9e7..d2e9c82696 100644 --- a/beacon-chain/powchain/deposit_test.go +++ b/beacon-chain/powchain/deposit_test.go @@ -199,10 +199,7 @@ func TestProcessDeposit_UnableToVerify(t *testing.T) { if err != nil { t.Fatal(err) } - - domain := bls.ComputeDomain(params.BeaconConfig().DomainDeposit) - - sig := keys[0].Sign([]byte{'F', 'A', 'K', 'E'}, domain) + sig := keys[0].Sign([]byte{'F', 'A', 'K', 'E'}) deposits[0].Data.Signature = sig.Marshal()[:] trie, _, err := testutil.DepositTrieFromDeposits(deposits) @@ -249,12 +246,16 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) { sk := bls.RandKey() deposit.Data.PublicKey = sk.PublicKey().Marshal() - signedRoot, err := ssz.HashTreeRoot(deposit.Data) + d, err := helpers.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil) if err != nil { t.Fatal(err) } - domain := bls.ComputeDomain(params.BeaconConfig().DomainDeposit) - sig := sk.Sign(signedRoot[:], domain) + signedRoot, err := helpers.ComputeSigningRoot(deposit.Data, d) + if err != nil { + t.Fatal(err) + } + + sig := sk.Sign(signedRoot[:]) deposit.Data.Signature = sig.Marshal() trie, err := trieutil.NewTrie(int(params.BeaconConfig().DepositContractTreeDepth)) diff --git a/beacon-chain/powchain/log_processing_test.go b/beacon-chain/powchain/log_processing_test.go index 919d761770..066432b684 100644 --- a/beacon-chain/powchain/log_processing_test.go +++ b/beacon-chain/powchain/log_processing_test.go @@ -61,10 +61,12 @@ func TestProcessDepositLog_OK(t *testing.T) { } testAcc.Backend.Commit() + testutil.ResetCache() deposits, _, err := testutil.DeterministicDepositsAndKeys(1) if err != nil { t.Fatal(err) } + _, depositRoots, err := testutil.DeterministicDepositTrie(len(deposits)) if err != nil { t.Fatal(err) @@ -135,6 +137,7 @@ func TestProcessDepositLog_InsertsPendingDeposit(t *testing.T) { testAcc.Backend.Commit() + testutil.ResetCache() deposits, _, err := testutil.DeterministicDepositsAndKeys(1) if err != nil { t.Fatal(err) @@ -211,6 +214,7 @@ func TestUnpackDepositLogData_OK(t *testing.T) { t.Fatalf("Could not init from contract: %v", err) } + testutil.ResetCache() deposits, _, err := testutil.DeterministicDepositsAndKeys(1) if err != nil { t.Fatal(err) @@ -294,6 +298,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) { t.Fatal(err) } + testutil.ResetCache() deposits, _, err := testutil.DeterministicDepositsAndKeys(1) if err != nil { t.Fatal(err) @@ -379,11 +384,11 @@ func TestProcessETH2GenesisLog(t *testing.T) { t.Fatal(err) } + testutil.ResetCache() deposits, _, err := testutil.DeterministicDepositsAndKeys(uint64(depositsReqForChainStart)) if err != nil { t.Fatal(err) } - _, roots, err := testutil.DeterministicDepositTrie(len(deposits)) if err != nil { t.Fatal(err) @@ -601,6 +606,7 @@ func TestWeb3ServiceProcessDepositLog_RequestMissedDeposits(t *testing.T) { t.Fatal(err) } depositsWanted := 10 + testutil.ResetCache() deposits, _, err := testutil.DeterministicDepositsAndKeys(uint64(depositsWanted)) if err != nil { t.Fatal(err) @@ -700,11 +706,11 @@ func TestConsistentGenesisState(t *testing.T) { t.Fatal(err) } + testutil.ResetCache() deposits, _, err := testutil.DeterministicDepositsAndKeys(uint64(depositsReqForChainStart)) if err != nil { t.Fatal() } - _, roots, err := testutil.DeterministicDepositTrie(len(deposits)) if err != nil { t.Fatal(err) diff --git a/beacon-chain/rpc/beacon/BUILD.bazel b/beacon-chain/rpc/beacon/BUILD.bazel index 4597195f98..10da690b81 100644 --- a/beacon-chain/rpc/beacon/BUILD.bazel +++ b/beacon-chain/rpc/beacon/BUILD.bazel @@ -74,6 +74,7 @@ go_test( shard_count = 4, deps = [ "//beacon-chain/blockchain/testing:go_default_library", + "//beacon-chain/cache:go_default_library", "//beacon-chain/core/epoch/precompute:go_default_library", "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/block:go_default_library", @@ -88,6 +89,7 @@ go_test( "//beacon-chain/p2p/testing:go_default_library", "//beacon-chain/rpc/testing:go_default_library", "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stategen:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//shared/attestationutil:go_default_library", "//shared/bytesutil:go_default_library", diff --git a/beacon-chain/rpc/beacon/assignments.go b/beacon-chain/rpc/beacon/assignments.go index 9410953bde..144d93e351 100644 --- a/beacon-chain/rpc/beacon/assignments.go +++ b/beacon-chain/rpc/beacon/assignments.go @@ -100,9 +100,9 @@ func (bs *Server) ListValidatorAssignments( // initialize all committee related data. committeeAssignments := map[uint64]*helpers.CommitteeAssignmentContainer{} - proposerIndexToSlot := map[uint64]uint64{} + proposerIndexToSlots := make(map[uint64][]uint64) archivedInfo := &pb.ArchivedCommitteeInfo{} - archivedBalances := []uint64{} + archivedBalances := make([]uint64, 0) archivedAssignments := make(map[uint64]*ethpb.ValidatorAssignments_CommitteeAssignment) if shouldFetchFromArchive { @@ -120,7 +120,7 @@ func (bs *Server) ListValidatorAssignments( return nil, status.Errorf(codes.Internal, "Could not retrieve archived assignment for epoch %d: %v", requestedEpoch, err) } } else { - committeeAssignments, proposerIndexToSlot, err = helpers.CommitteeAssignments(headState, requestedEpoch) + committeeAssignments, proposerIndexToSlots, err = helpers.CommitteeAssignments(headState, requestedEpoch) if err != nil { return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err) } @@ -147,7 +147,7 @@ func (bs *Server) ListValidatorAssignments( BeaconCommittees: comAssignment.Committee, CommitteeIndex: comAssignment.CommitteeIndex, AttesterSlot: comAssignment.AttesterSlot, - ProposerSlot: proposerIndexToSlot[index], + ProposerSlots: proposerIndexToSlots[index], PublicKey: pubkey[:], } res = append(res, assign) @@ -173,7 +173,7 @@ func archivedValidatorCommittee( attesterSeed := bytesutil.ToBytes32(archivedInfo.AttesterSeed) startSlot := helpers.StartSlot(epoch) - proposerIndexToSlot := make(map[uint64]uint64) + proposerIndexToSlots := make(map[uint64][]uint64) activeVals := make([]*ethpb.Validator, len(archivedBalances)) for i, bal := range archivedBalances { activeVals[i] = ðpb.Validator{EffectiveBalance: bal} @@ -186,7 +186,7 @@ func archivedValidatorCommittee( if err != nil { return nil, errors.Wrapf(err, "could not check proposer at slot %d", slot) } - proposerIndexToSlot[i] = slot + proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot) } assignmentMap := make(map[uint64]*ethpb.ValidatorAssignments_CommitteeAssignment) @@ -208,7 +208,7 @@ func archivedValidatorCommittee( BeaconCommittees: committee, CommitteeIndex: i, AttesterSlot: slot, - ProposerSlot: proposerIndexToSlot[index], + ProposerSlots: proposerIndexToSlots[index], } } } diff --git a/beacon-chain/rpc/beacon/assignments_test.go b/beacon-chain/rpc/beacon/assignments_test.go index 34dc53cc30..11bb7e19f1 100644 --- a/beacon-chain/rpc/beacon/assignments_test.go +++ b/beacon-chain/rpc/beacon/assignments_test.go @@ -19,6 +19,7 @@ import ( stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/params" + "github.com/prysmaticlabs/prysm/shared/testutil" ) func TestServer_ListAssignments_CannotRequestFutureEpoch(t *testing.T) { @@ -140,7 +141,7 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing. defer dbTest.TeardownDB(t, db) ctx := context.Background() - count := 1000 + count := 500 validators := make([]*ethpb.Validator, 0, count) for i := 0; i < count; i++ { pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength) @@ -171,11 +172,8 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing. t.Fatal(err) } - s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Validators: validators, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetValidators(validators); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, s, blockRoot); err != nil { @@ -211,24 +209,23 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing. if err != nil { t.Fatal(err) } + committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(s, 0) + if err != nil { + t.Fatal(err) + } for _, index := range activeIndices[0:params.BeaconConfig().DefaultPageSize] { - committee, committeeIndex, attesterSlot, proposerSlot, err := helpers.CommitteeAssignment(s, 0, index) - if err != nil { - t.Fatal(err) - } val, err := s.ValidatorAtIndex(index) if err != nil { t.Fatal(err) } wanted = append(wanted, ðpb.ValidatorAssignments_CommitteeAssignment{ - BeaconCommittees: committee, - CommitteeIndex: committeeIndex, - AttesterSlot: attesterSlot, - ProposerSlot: proposerSlot, + BeaconCommittees: committeeAssignments[index].Committee, + CommitteeIndex: committeeAssignments[index].CommitteeIndex, + AttesterSlot: committeeAssignments[index].AttesterSlot, + ProposerSlots: proposerIndexToSlots[index], PublicKey: val.PublicKey, }) } - if !reflect.DeepEqual(res.Assignments, wanted) { t.Error("Did not receive wanted assignments") } @@ -240,8 +237,8 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_FromArchive(t *testin defer dbTest.TeardownDB(t, db) ctx := context.Background() - count := 1000 - validators := make([]*ethpb.Validator, 0, count) + count := 500 + validators := make([]*ethpb.Validator, 0) balances := make([]uint64, count) for i := 0; i < count; i++ { pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength) @@ -270,12 +267,11 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_FromArchive(t *testin if err != nil { t.Fatal(err) } - s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Validators: validators, - Balances: balances, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := s.SetBalances(balances); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, s, blockRoot); err != nil { @@ -326,23 +322,22 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_FromArchive(t *testin if err != nil { t.Fatal(err) } + committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(s, 0) + if err != nil { + t.Fatal(err) + } for _, index := range activeIndices[0:params.BeaconConfig().DefaultPageSize] { - committee, committeeIndex, attesterSlot, proposerSlot, err := helpers.CommitteeAssignment(s, 0, index) - if err != nil { - t.Fatal(err) - } val, err := s.ValidatorAtIndex(index) if err != nil { t.Fatal(err) } - assign := ðpb.ValidatorAssignments_CommitteeAssignment{ - BeaconCommittees: committee, - CommitteeIndex: committeeIndex, - AttesterSlot: attesterSlot, - ProposerSlot: proposerSlot, + wanted = append(wanted, ðpb.ValidatorAssignments_CommitteeAssignment{ + BeaconCommittees: committeeAssignments[index].Committee, + CommitteeIndex: committeeAssignments[index].CommitteeIndex, + AttesterSlot: committeeAssignments[index].AttesterSlot, + ProposerSlots: proposerIndexToSlots[index], PublicKey: val.PublicKey, - } - wanted = append(wanted, assign) + }) } res, err := bs.ListValidatorAssignments(context.Background(), ðpb.ListValidatorAssignmentsRequest{ @@ -377,11 +372,8 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T) if err != nil { t.Fatal(err) } - s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Validators: validators, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetValidators(validators); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, s, blockRoot); err != nil { @@ -420,20 +412,20 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T) if err != nil { t.Fatal(err) } + committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(s, 0) + if err != nil { + t.Fatal(err) + } for _, index := range activeIndices[1:4] { - committee, committeeIndex, attesterSlot, proposerSlot, err := helpers.CommitteeAssignment(s, 0, index) - if err != nil { - t.Fatal(err) - } val, err := s.ValidatorAtIndex(index) if err != nil { t.Fatal(err) } wanted = append(wanted, ðpb.ValidatorAssignments_CommitteeAssignment{ - BeaconCommittees: committee, - CommitteeIndex: committeeIndex, - AttesterSlot: attesterSlot, - ProposerSlot: proposerSlot, + BeaconCommittees: committeeAssignments[index].Committee, + CommitteeIndex: committeeAssignments[index].CommitteeIndex, + AttesterSlot: committeeAssignments[index].AttesterSlot, + ProposerSlots: proposerIndexToSlots[index], PublicKey: val.PublicKey, }) } @@ -463,11 +455,8 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin if err != nil { t.Fatal(err) } - s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Validators: validators, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetValidators(validators); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, s, blockRoot); err != nil { @@ -502,20 +491,20 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin if err != nil { t.Fatal(err) } + committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(s, 0) + if err != nil { + t.Fatal(err) + } for _, index := range activeIndices[3:5] { - committee, committeeIndex, attesterSlot, proposerSlot, err := helpers.CommitteeAssignment(s, 0, index) - if err != nil { - t.Fatal(err) - } val, err := s.ValidatorAtIndex(index) if err != nil { t.Fatal(err) } assignments = append(assignments, ðpb.ValidatorAssignments_CommitteeAssignment{ - BeaconCommittees: committee, - CommitteeIndex: committeeIndex, - AttesterSlot: attesterSlot, - ProposerSlot: proposerSlot, + BeaconCommittees: committeeAssignments[index].Committee, + CommitteeIndex: committeeAssignments[index].CommitteeIndex, + AttesterSlot: committeeAssignments[index].AttesterSlot, + ProposerSlots: proposerIndexToSlots[index], PublicKey: val.PublicKey, }) } @@ -537,21 +526,20 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin if err != nil { t.Fatal(err) } - + cAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(s, 0) + if err != nil { + t.Fatal(err) + } for _, index := range activeIndices[6:7] { - committee, committeeIndex, attesterSlot, proposerSlot, err := helpers.CommitteeAssignment(s, 0, index) - if err != nil { - t.Fatal(err) - } val, err := s.ValidatorAtIndex(index) if err != nil { t.Fatal(err) } assignments = append(assignments, ðpb.ValidatorAssignments_CommitteeAssignment{ - BeaconCommittees: committee, - CommitteeIndex: committeeIndex, - AttesterSlot: attesterSlot, - ProposerSlot: proposerSlot, + BeaconCommittees: cAssignments[index].Committee, + CommitteeIndex: cAssignments[index].CommitteeIndex, + AttesterSlot: cAssignments[index].AttesterSlot, + ProposerSlots: proposerIndexToSlots[index], PublicKey: val.PublicKey, }) } diff --git a/beacon-chain/rpc/beacon/attestations.go b/beacon-chain/rpc/beacon/attestations.go index cc42ef11ad..5ec0164c73 100644 --- a/beacon-chain/rpc/beacon/attestations.go +++ b/beacon-chain/rpc/beacon/attestations.go @@ -15,6 +15,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/db/filters" "github.com/prysmaticlabs/prysm/beacon-chain/flags" "github.com/prysmaticlabs/prysm/shared/attestationutil" + "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/pagination" "github.com/prysmaticlabs/prysm/shared/params" "github.com/sirupsen/logrus" @@ -102,7 +103,6 @@ func (bs *Server) ListIndexedAttestations( ) (*ethpb.ListIndexedAttestationsResponse, error) { blocks := make([]*ethpb.SignedBeaconBlock, 0) var err error - epoch := helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot()) switch q := req.QueryFilter.(type) { case *ethpb.ListIndexedAttestationsRequest_GenesisEpoch: blocks, err = bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(0).SetEndEpoch(0)) @@ -135,30 +135,40 @@ func (bs *Server) ListIndexedAttestations( }, nil } - committeesBySlot, _, err := bs.retrieveCommitteesForEpoch(ctx, epoch) - if err != nil { - return nil, status.Errorf( - codes.Internal, - "Could not retrieve committees for epoch %d: %v", - epoch, - err, - ) - } - // We use the retrieved committees for the epoch to convert all attestations // into indexed form effectively. indexedAtts := make([]*ethpb.IndexedAttestation, numAttestations, numAttestations) - startSlot := helpers.StartSlot(epoch) - endSlot := startSlot + params.BeaconConfig().SlotsPerEpoch - for i := 0; i < len(indexedAtts); i++ { + for i := 0; i < len(atts); i++ { att := atts[i] - // Out of range check, the attestation slot cannot be greater - // the last slot of the requested epoch or smaller than its start slot - // given committees are accessed as a map of slot -> commitees list, where there are - // SLOTS_PER_EPOCH keys in the map. - if att.Data.Slot < startSlot || att.Data.Slot > endSlot { - continue + epoch := helpers.SlotToEpoch(att.Data.Slot) + attState, err := bs.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) + if err != nil { + return nil, status.Errorf( + codes.Internal, + "Could not retrieve state for attestation data block root %v: %v", + att.Data.BeaconBlockRoot, + err, + ) } + activeIndices, err := helpers.ActiveValidatorIndices(attState, epoch) + if err != nil { + return nil, status.Errorf( + codes.Internal, + "Could not retrieve active validator indices for epoch %d: %v", + epoch, + err, + ) + } + seed, err := helpers.Seed(attState, epoch, params.BeaconConfig().DomainBeaconAttester) + if err != nil { + return nil, status.Errorf( + codes.Internal, + "Could not seed for epoch %d: %v", + epoch, + err, + ) + } + committeesBySlot, err := computeCommittees(helpers.StartSlot(epoch), activeIndices, seed) committee := committeesBySlot[att.Data.Slot].Committees[att.Data.CommitteeIndex] idxAtt := attestationutil.ConvertToIndexed(ctx, atts[i], committee.ValidatorIndices) indexedAtts[i] = idxAtt diff --git a/beacon-chain/rpc/beacon/attestations_test.go b/beacon-chain/rpc/beacon/attestations_test.go index 0482645b63..f4ee65754f 100644 --- a/beacon-chain/rpc/beacon/attestations_test.go +++ b/beacon-chain/rpc/beacon/attestations_test.go @@ -17,6 +17,7 @@ import ( "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/go-ssz" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" @@ -25,6 +26,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" mockRPC "github.com/prysmaticlabs/prysm/beacon-chain/rpc/testing" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/attestationutil" "github.com/prysmaticlabs/prysm/shared/bytesutil" @@ -536,6 +538,8 @@ func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) { helpers.ClearCache() ctx := context.Background() + params.OverrideBeaconConfig(params.MainnetConfig()) + defer params.OverrideBeaconConfig(params.MinimalSpecConfig()) count := params.BeaconConfig().SlotsPerEpoch atts := make([]*ethpb.Attestation, 0, count) for i := uint64(0); i < count; i++ { @@ -563,22 +567,14 @@ func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) { // We setup 128 validators. numValidators := 128 - headState := setupActiveValidators(t, db, numValidators) + state := setupActiveValidators(t, db, numValidators) - randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector) - for i := 0; i < len(randaoMixes); i++ { - randaoMixes[i] = make([]byte, 32) - } - if err := headState.SetRandaoMixes(randaoMixes); err != nil { - t.Fatal(err) - } - - activeIndices, err := helpers.ActiveValidatorIndices(headState, 0) + activeIndices, err := helpers.ActiveValidatorIndices(state, 0) if err != nil { t.Fatal(err) } epoch := uint64(0) - attesterSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconAttester) + attesterSeed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) if err != nil { t.Fatal(err) } @@ -586,29 +582,35 @@ func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) { if err != nil { t.Fatal(err) } - // Next up we convert the test attestations to indexed form: indexedAtts := make([]*ethpb.IndexedAttestation, len(atts), len(atts)) for i := 0; i < len(indexedAtts); i++ { att := atts[i] committee := committees[att.Data.Slot].Committees[att.Data.CommitteeIndex] idxAtt := attestationutil.ConvertToIndexed(ctx, atts[i], committee.ValidatorIndices) - if err != nil { - t.Fatalf("Could not convert attestation to indexed: %v", err) - } indexedAtts[i] = idxAtt } + summaryCache := cache.NewStateSummaryCache() bs := &Server{ BeaconDB: db, - HeadFetcher: &mock.ChainService{ - State: headState, - }, GenesisTimeFetcher: &mock.ChainService{ Genesis: time.Now(), }, + StateGen: stategen.New(db, summaryCache), } - + root := bytesutil.ToBytes32([]byte("root")) + if err := db.SaveState(ctx, state, root); err != nil { + t.Fatal(err) + } + stateRoot, err := state.HashTreeRoot(ctx) + if err != nil { + t.Fatal(err) + } + summaryCache.Put(root, &pbp2p.StateSummary{ + Slot: 0, + Root: stateRoot[:], + }) res, err := bs.ListIndexedAttestations(ctx, ðpb.ListIndexedAttestationsRequest{ QueryFilter: ðpb.ListIndexedAttestationsRequest_GenesisEpoch{ GenesisEpoch: true, @@ -666,24 +668,16 @@ func TestServer_ListIndexedAttestations_ArchivedEpoch(t *testing.T) { // We setup 128 validators. numValidators := 128 - headState := setupActiveValidators(t, db, numValidators) - - randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector) - for i := 0; i < len(randaoMixes); i++ { - randaoMixes[i] = make([]byte, 32) - } - if err := headState.SetRandaoMixes(randaoMixes); err != nil { - t.Fatal(err) - } - if err := headState.SetSlot(startSlot); err != nil { + state := setupActiveValidators(t, db, numValidators) + if err := state.SetSlot(startSlot); err != nil { t.Fatal(err) } - activeIndices, err := helpers.ActiveValidatorIndices(headState, epoch) + activeIndices, err := helpers.ActiveValidatorIndices(state, epoch) if err != nil { t.Fatal(err) } - attesterSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconAttester) + attesterSeed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) if err != nil { t.Fatal(err) } @@ -698,22 +692,18 @@ func TestServer_ListIndexedAttestations_ArchivedEpoch(t *testing.T) { att := atts[i] committee := committees[att.Data.Slot].Committees[att.Data.CommitteeIndex] idxAtt := attestationutil.ConvertToIndexed(ctx, atts[i], committee.ValidatorIndices) - if err != nil { - t.Fatalf("Could not convert attestation to indexed: %v", err) - } indexedAtts[i] = idxAtt } bs := &Server{ BeaconDB: db, - HeadFetcher: &mock.ChainService{ - State: headState, - }, GenesisTimeFetcher: &mock.ChainService{ Genesis: time.Now(), }, } - + if err := db.SaveState(ctx, state, bytesutil.ToBytes32([]byte("root"))); err != nil { + t.Fatal(err) + } res, err := bs.ListIndexedAttestations(ctx, ðpb.ListIndexedAttestationsRequest{ QueryFilter: ðpb.ListIndexedAttestationsRequest_Epoch{ Epoch: epoch, @@ -946,11 +936,11 @@ func TestServer_StreamIndexedAttestations_OK(t *testing.T) { }, }, } - encoded, err := ssz.Marshal(attExample.Data) + encoded, err := helpers.ComputeSigningRoot(attExample.Data, []byte{}) if err != nil { t.Fatal(err) } - sig := privKeys[j].Sign(encoded, 0 /*domain*/) + sig := privKeys[j].Sign(encoded[:]) attExample.Signature = sig.Marshal() var indexInCommittee uint64 @@ -990,9 +980,6 @@ func TestServer_StreamIndexedAttestations_OK(t *testing.T) { att := aggAtts[i] committee := committees[att.Data.Slot].Committees[att.Data.CommitteeIndex] idxAtt := attestationutil.ConvertToIndexed(ctx, att, committee.ValidatorIndices) - if err != nil { - t.Fatalf("Could not convert attestation to indexed: %v", err) - } indexedAtts[i] = idxAtt } diff --git a/beacon-chain/rpc/beacon/committees_test.go b/beacon-chain/rpc/beacon/committees_test.go index a1d3ad9521..d7fababdb6 100644 --- a/beacon-chain/rpc/beacon/committees_test.go +++ b/beacon-chain/rpc/beacon/committees_test.go @@ -17,6 +17,7 @@ import ( pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/roughtime" + "github.com/prysmaticlabs/prysm/shared/testutil" "gopkg.in/d4l3k/messagediff.v1" ) @@ -254,9 +255,12 @@ func setupActiveValidators(t *testing.T, db db.Database, count int) *stateTrie.B WithdrawalCredentials: make([]byte, 32), }) } - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{Validators: validators, Balances: balances}) - if err != nil { - t.Fatal(err) + s := testutil.NewBeaconState() + if err := s.SetValidators(validators); err != nil { + return nil } - return st + if err := s.SetBalances(balances); err != nil { + return nil + } + return s } diff --git a/beacon-chain/rpc/beacon/slashings.go b/beacon-chain/rpc/beacon/slashings.go index 5326cf7b5d..682aefc479 100644 --- a/beacon-chain/rpc/beacon/slashings.go +++ b/beacon-chain/rpc/beacon/slashings.go @@ -30,7 +30,7 @@ func (bs *Server) SubmitProposerSlashing( } } return ðpb.SubmitSlashingResponse{ - SlashedIndices: []uint64{req.ProposerIndex}, + SlashedIndices: []uint64{req.Header_1.Header.ProposerIndex}, }, nil } diff --git a/beacon-chain/rpc/beacon/validators_stream.go b/beacon-chain/rpc/beacon/validators_stream.go index da078d4580..9452864ba3 100644 --- a/beacon-chain/rpc/beacon/validators_stream.go +++ b/beacon-chain/rpc/beacon/validators_stream.go @@ -519,7 +519,8 @@ func (is *infostream) depositQueueTimestamp(eth1BlockNumber *big.Int) (uint64, e followTime := time.Duration(params.BeaconConfig().Eth1FollowDistance*params.BeaconConfig().GoerliBlockTime) * time.Second eth1UnixTime := time.Unix(int64(blockTimestamp), 0).Add(followTime) - votingPeriod := time.Duration(params.BeaconConfig().SlotsPerEth1VotingPeriod*params.BeaconConfig().SecondsPerSlot) * time.Second + period := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().EpochsPerEth1VotingPeriod + votingPeriod := time.Duration(period*params.BeaconConfig().SecondsPerSlot) * time.Second activationTime := eth1UnixTime.Add(votingPeriod) eth2Genesis := time.Unix(int64(is.genesisTime), 0) diff --git a/beacon-chain/rpc/beacon/validators_test.go b/beacon-chain/rpc/beacon/validators_test.go index d4e7185c03..98b92a0da9 100644 --- a/beacon-chain/rpc/beacon/validators_test.go +++ b/beacon-chain/rpc/beacon/validators_test.go @@ -13,10 +13,7 @@ import ( "github.com/gogo/protobuf/proto" ptypes "github.com/gogo/protobuf/types" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/go-ssz" - pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" - mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" @@ -24,7 +21,9 @@ import ( dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/beacon-chain/flags" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/params" + "github.com/prysmaticlabs/prysm/shared/testutil" ) func init() { @@ -40,10 +39,8 @@ func TestServer_ListValidatorBalances_CannotRequestFutureEpoch(t *testing.T) { defer dbTest.TeardownDB(t, db) ctx := context.Background() - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: 0, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(0); err != nil { t.Fatal(err) } bs := &Server{ @@ -71,10 +68,8 @@ func TestServer_ListValidatorBalances_NoResults(t *testing.T) { defer dbTest.TeardownDB(t, db) ctx := context.Background() - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: 0, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(0); err != nil { t.Fatal(err) } bs := &Server{ @@ -125,12 +120,14 @@ func TestServer_ListValidatorBalances_DefaultResponse_NoArchive(t *testing.T) { Balance: params.BeaconConfig().MaxEffectiveBalance, } } - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: 0, - Validators: validators, - Balances: balances, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(0); err != nil { + t.Fatal(err) + } + if err := st.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := st.SetBalances(balances); err != nil { t.Fatal(err) } bs := &Server{ @@ -189,12 +186,14 @@ func TestServer_ListValidatorBalances_DefaultResponse_FromArchive(t *testing.T) if err := db.SaveArchivedBalances(ctx, 50, oldBalances); err != nil { t.Fatal(err) } - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: helpers.StartSlot(100 /* epoch 100 */), - Validators: validators, - Balances: balances, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(helpers.StartSlot(100) /* epoch 100 */); err != nil { + t.Fatal(err) + } + if err := st.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := st.SetBalances(balances); err != nil { t.Fatal(err) } bs := &Server{ @@ -430,7 +429,7 @@ func TestServer_ListValidatorBalances_OutOfRange(t *testing.T) { req := ðpb.ListValidatorBalancesRequest{Indices: []uint64{uint64(1)}} wanted := "does not exist" - if _, err := bs.ListValidatorBalances(context.Background(), req); !strings.Contains(err.Error(), wanted) { + if _, err := bs.ListValidatorBalances(context.Background(), req); err == nil || !strings.Contains(err.Error(), wanted) { t.Errorf("Expected error %v, received %v", wanted, err) } } @@ -450,12 +449,14 @@ func TestServer_ListValidatorBalances_FromArchive(t *testing.T) { for i := 0; i < len(newerBalances); i++ { newerBalances[i] = balances[i] * 2 } - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: params.BeaconConfig().SlotsPerEpoch * 3, - Validators: validators, - Balances: newerBalances, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(params.BeaconConfig().SlotsPerEpoch * 3); err != nil { + t.Fatal(err) + } + if err := st.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := st.SetBalances(newerBalances); err != nil { t.Fatal(err) } bs := &Server{ @@ -499,12 +500,14 @@ func TestServer_ListValidatorBalances_FromArchive_NewValidatorNotFound(t *testin } newValidators, newBalances := setupValidators(t, db, 200) - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: params.BeaconConfig().SlotsPerEpoch * 3, - Validators: newValidators, - Balances: newBalances, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(params.BeaconConfig().SlotsPerEpoch * 3); err != nil { + t.Fatal(err) + } + if err := st.SetValidators(newValidators); err != nil { + t.Fatal(err) + } + if err := st.SetBalances(newBalances); err != nil { t.Fatal(err) } bs := &Server{ @@ -518,7 +521,7 @@ func TestServer_ListValidatorBalances_FromArchive_NewValidatorNotFound(t *testin QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{Epoch: 0}, Indices: []uint64{1, 150, 161}, } - if _, err := bs.ListValidatorBalances(context.Background(), req); !strings.Contains(err.Error(), "does not exist") { + if _, err := bs.ListValidatorBalances(context.Background(), req); err == nil || !strings.Contains(err.Error(), "does not exist") { t.Errorf("Wanted out of range error for including newer validators in the arguments, received %v", err) } } @@ -528,10 +531,8 @@ func TestServer_ListValidators_CannotRequestFutureEpoch(t *testing.T) { defer dbTest.TeardownDB(t, db) ctx := context.Background() - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: 0, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(0); err != nil { t.Fatal(err) } bs := &Server{ @@ -559,10 +560,8 @@ func TestServer_ListValidators_NoResults(t *testing.T) { defer dbTest.TeardownDB(t, db) ctx := context.Background() - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: 0, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(0); err != nil { t.Fatal(err) } @@ -628,11 +627,11 @@ func TestServer_ListValidators_OnlyActiveValidators(t *testing.T) { } } } - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Validators: validators, - Balances: balances, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := st.SetBalances(balances); err != nil { t.Fatal(err) } @@ -909,7 +908,7 @@ func TestServer_ListValidators_PaginationOutOfRange(t *testing.T) { req := ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(1), PageSize: 100} wanted := fmt.Sprintf("page start %d >= list %d", req.PageSize, len(validators)) - if _, err := bs.ListValidators(context.Background(), req); !strings.Contains(err.Error(), wanted) { + if _, err := bs.ListValidators(context.Background(), req); err == nil || !strings.Contains(err.Error(), wanted) { t.Errorf("Expected error %v, received %v", wanted, err) } } @@ -920,7 +919,7 @@ func TestServer_ListValidators_ExceedsMaxPageSize(t *testing.T) { wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, flags.Get().MaxPageSize) req := ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax} - if _, err := bs.ListValidators(context.Background(), req); !strings.Contains(err.Error(), wanted) { + if _, err := bs.ListValidators(context.Background(), req); err == nil || !strings.Contains(err.Error(), wanted) { t.Errorf("Expected error %v, received %v", wanted, err) } } @@ -987,11 +986,11 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) { } } - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: helpers.StartSlot(30), - Validators: validators, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(helpers.StartSlot(30)); err != nil { + t.Fatal(err) + } + if err := st.SetValidators(validators); err != nil { t.Fatal(err) } bs := &Server{ @@ -1041,10 +1040,8 @@ func TestServer_GetValidator(t *testing.T) { } } - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Validators: validators, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetValidators(validators); err != nil { t.Fatal(err) } @@ -1129,10 +1126,8 @@ func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing defer dbTest.TeardownDB(t, db) ctx := context.Background() - st, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: 0, - }) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(0); err != nil { t.Fatal(err) } bs := &Server{ @@ -1158,11 +1153,11 @@ func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing func TestServer_GetValidatorActiveSetChanges(t *testing.T) { ctx := context.Background() validators := make([]*ethpb.Validator, 8) - headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: 0, - Validators: validators, - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(0); err != nil { + t.Fatal(err) + } + if err := headState.SetValidators(validators); err != nil { t.Fatal(err) } for i := 0; i < len(validators); i++ { @@ -1252,11 +1247,11 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) { defer dbTest.TeardownDB(t, db) ctx := context.Background() validators := make([]*ethpb.Validator, 8) - headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: helpers.StartSlot(100), - Validators: validators, - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(helpers.StartSlot(100)); err != nil { + t.Fatal(err) + } + if err := headState.SetValidators(validators); err != nil { t.Fatal(err) } activatedIndices := make([]uint64, 0) @@ -1437,13 +1432,12 @@ func TestServer_GetValidatorQueue_ExitedValidatorLeavesQueue(t *testing.T) { PublicKey: []byte("2"), }, } - headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Validators: validators, - FinalizedCheckpoint: ðpb.Checkpoint{ - Epoch: 0, - }, - }) - if err != nil { + + headState := testutil.NewBeaconState() + if err := headState.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := headState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 0}); err != nil { t.Fatal(err) } bs := &Server{ @@ -1557,10 +1551,8 @@ func TestServer_GetValidatorParticipation_CannotRequestCurrentEpoch(t *testing.T defer dbTest.TeardownDB(t, db) ctx := context.Background() - headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: helpers.StartSlot(2), - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(helpers.StartSlot(2)); err != nil { t.Fatal(err) } bs := &Server{ @@ -1588,10 +1580,8 @@ func TestServer_GetValidatorParticipation_CannotRequestFutureEpoch(t *testing.T) defer dbTest.TeardownDB(t, db) ctx := context.Background() - headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: 0, - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(0); err != nil { t.Fatal(err) } bs := &Server{ @@ -1628,13 +1618,11 @@ func TestServer_GetValidatorParticipation_FromArchive(t *testing.T) { t.Fatal(err) } - headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: helpers.StartSlot(epoch + 1), - FinalizedCheckpoint: ðpb.Checkpoint{ - Epoch: epoch + 1, - }, - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(helpers.StartSlot(epoch + 1)); err != nil { + t.Fatal(err) + } + if err := headState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: epoch + 1}); err != nil { t.Fatal(err) } bs := &Server{ @@ -1691,14 +1679,11 @@ func TestServer_GetValidatorParticipation_FromArchive_FinalizedEpoch(t *testing. if err := db.SaveArchivedValidatorParticipation(ctx, epoch, part); err != nil { t.Fatal(err) } - headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: helpers.StartSlot(epoch + 10), - FinalizedCheckpoint: ðpb.Checkpoint{ - // We say there have been 5 epochs since finality. - Epoch: epoch + 5, - }, - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(helpers.StartSlot(epoch + 10)); err != nil { + t.Fatal(err) + } + if err := headState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: epoch + 5}); err != nil { t.Fatal(err) } @@ -1748,19 +1733,17 @@ func TestServer_GetValidatorParticipation_PrevEpoch(t *testing.T) { } atts := []*pbp2p.PendingAttestation{{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}} - headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: epoch*params.BeaconConfig().SlotsPerEpoch + 1, - Validators: validators, - Balances: balances, - BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot), - Slashings: []uint64{0, 1e9, 1e9}, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - CurrentEpochAttestations: atts, - FinalizedCheckpoint: ðpb.Checkpoint{}, - JustificationBits: bitfield.Bitvector4{0x00}, - CurrentJustifiedCheckpoint: ðpb.Checkpoint{}, - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(epoch*params.BeaconConfig().SlotsPerEpoch + 1); err != nil { + t.Fatal(err) + } + if err := headState.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := headState.SetBalances(balances); err != nil { + t.Fatal(err) + } + if err := headState.SetCurrentEpochAttestations(atts); err != nil { t.Fatal(err) } @@ -1812,24 +1795,22 @@ func TestServer_GetValidatorParticipation_DoesntExist(t *testing.T) { } atts := []*pbp2p.PendingAttestation{{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}} - s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: epoch*params.BeaconConfig().SlotsPerEpoch + 1, - Validators: validators, - Balances: balances, - BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot), - Slashings: []uint64{0, 1e9, 1e9}, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - CurrentEpochAttestations: atts, - FinalizedCheckpoint: ðpb.Checkpoint{}, - JustificationBits: bitfield.Bitvector4{0x00}, - CurrentJustifiedCheckpoint: ðpb.Checkpoint{}, - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(epoch*params.BeaconConfig().SlotsPerEpoch + 1); err != nil { + t.Fatal(err) + } + if err := headState.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := headState.SetBalances(balances); err != nil { + t.Fatal(err) + } + if err := headState.SetCurrentEpochAttestations(atts); err != nil { t.Fatal(err) } m := &mock.ChainService{ - State: s, + State: headState, } bs := &Server{ BeaconDB: db, @@ -1904,17 +1885,17 @@ func BenchmarkListValidatorBalances_FromArchive(b *testing.B) { if err := db.SaveArchivedBalances(ctx, 50, oldBalances); err != nil { b.Fatal(err) } - s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: helpers.StartSlot(100 /* epoch 100 */), - Validators: validators, - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetSlot(helpers.StartSlot(100 /* epoch 100 */)); err != nil { + b.Fatal(err) + } + if err := headState.SetValidators(validators); err != nil { b.Fatal(err) } bs := &Server{ BeaconDB: db, HeadFetcher: &mock.ChainService{ - State: s, + State: headState, }, } @@ -1953,12 +1934,11 @@ func setupValidators(t testing.TB, db db.Database, count int) ([]*ethpb.Validato if err != nil { t.Fatal(err) } - s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Validators: validators, - Balances: balances, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { + s := testutil.NewBeaconState() + if err := s.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := s.SetBalances(balances); err != nil { t.Fatal(err) } if err := db.SaveState( diff --git a/beacon-chain/rpc/service.go b/beacon-chain/rpc/service.go index 3981e5566e..e499f86d11 100644 --- a/beacon-chain/rpc/service.go +++ b/beacon-chain/rpc/service.go @@ -261,6 +261,7 @@ func (s *Service) Start() { Broadcaster: s.p2p, ReceivedAttestationsBuffer: make(chan *ethpb.Attestation, 100), CollectedAttestationsBuffer: make(chan []*ethpb.Attestation, 100), + StateGen: s.stateGen, } ethpb.RegisterNodeServer(s.grpcServer, nodeServer) ethpb.RegisterBeaconChainServer(s.grpcServer, beaconChainServer) diff --git a/beacon-chain/rpc/validator/BUILD.bazel b/beacon-chain/rpc/validator/BUILD.bazel index 99ab0f13cb..6f80649e3c 100644 --- a/beacon-chain/rpc/validator/BUILD.bazel +++ b/beacon-chain/rpc/validator/BUILD.bazel @@ -41,8 +41,6 @@ go_library( "//shared/featureconfig:go_default_library", "//shared/hashutil:go_default_library", "//shared/params:go_default_library", - "//shared/roughtime:go_default_library", - "//shared/slotutil:go_default_library", "//shared/traceutil:go_default_library", "//shared/trieutil:go_default_library", "@com_github_gogo_protobuf//types:go_default_library", @@ -86,6 +84,7 @@ go_test( "//beacon-chain/powchain/testing:go_default_library", "//beacon-chain/rpc/testing:go_default_library", "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stategen:go_default_library", "//beacon-chain/sync/initial-sync/testing:go_default_library", "//proto/beacon/db:go_default_library", "//proto/beacon/p2p/v1:go_default_library", diff --git a/beacon-chain/rpc/validator/aggregator.go b/beacon-chain/rpc/validator/aggregator.go index d50cb646f6..f9813fc399 100644 --- a/beacon-chain/rpc/validator/aggregator.go +++ b/beacon-chain/rpc/validator/aggregator.go @@ -12,10 +12,11 @@ import ( "google.golang.org/grpc/status" ) -// SubmitAggregateAndProof is called by a validator when its assigned to be an aggregator. -// The beacon node will broadcast aggregated attestation and proof on the aggregator's behavior. -func (as *Server) SubmitAggregateAndProof(ctx context.Context, req *ethpb.AggregationRequest) (*ethpb.AggregationResponse, error) { - ctx, span := trace.StartSpan(ctx, "AggregatorServer.SubmitAggregation") +// SubmitAggregateSelectionProof is called by a validator when its assigned to be an aggregator. +// The aggregator submits the selection proof to obtain the aggregated attestation +// object to sign over. +func (as *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.AggregateSelectionRequest) (*ethpb.AggregateSelectionResponse, error) { + ctx, span := trace.StartSpan(ctx, "AggregatorServer.SubmitAggregateSelectionProof") defer span.End() span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot))) @@ -56,30 +57,47 @@ func (as *Server) SubmitAggregateAndProof(ctx context.Context, req *ethpb.Aggreg return nil, status.Errorf(codes.InvalidArgument, "Validator is not an aggregator") } - // Retrieve the unaggregated attestation from pool. + if err := as.AttPool.AggregateUnaggregatedAttestations(); err != nil { + return nil, status.Errorf(codes.Internal, "Could not aggregate unaggregated attestations") + } aggregatedAtts := as.AttPool.AggregatedAttestationsBySlotIndex(req.Slot, req.CommitteeIndex) - for _, aggregatedAtt := range aggregatedAtts { - if ctx.Err() != nil { - return nil, ctx.Err() - } - if helpers.IsAggregated(aggregatedAtt) { - if err := as.P2P.Broadcast(ctx, ðpb.AggregateAttestationAndProof{ - AggregatorIndex: validatorIndex, - SelectionProof: req.SlotSignature, - Aggregate: aggregatedAtt, - }); err != nil { - return nil, status.Errorf(codes.Internal, "Could not broadcast aggregated attestation: %v", err) - } - - log.WithFields(logrus.Fields{ - "slot": req.Slot, - "committeeIndex": req.CommitteeIndex, - "validatorIndex": validatorIndex, - "aggregatedCount": aggregatedAtt.AggregationBits.Count(), - }).Debug("Broadcasting aggregated attestation and proof") + // Filter out the best aggregated attestation (ie. the one with the most aggregated bits). + if len(aggregatedAtts) == 0 { + return nil, status.Error(codes.Internal, "No aggregated attestation in beacon node") + } + best := aggregatedAtts[0] + for _, aggregatedAtt := range aggregatedAtts[1:] { + if aggregatedAtt.AggregationBits.Count() > best.AggregationBits.Count() { + best = aggregatedAtt } } - return ðpb.AggregationResponse{}, nil + a := ðpb.AggregateAttestationAndProof{ + Aggregate: best, + SelectionProof: req.SlotSignature, + AggregatorIndex: validatorIndex, + } + return ðpb.AggregateSelectionResponse{AggregateAndProof: a}, nil +} + +// SubmitSignedAggregateSelectionProof is called by a validator to broadcast a signed +// aggregated and proof object. +func (as *Server) SubmitSignedAggregateSelectionProof(ctx context.Context, req *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error) { + if req.SignedAggregateAndProof == nil { + return nil, status.Error(codes.InvalidArgument, "Signed aggregate request can't be nil") + } + + if err := as.P2P.Broadcast(ctx, req.SignedAggregateAndProof); err != nil { + return nil, status.Errorf(codes.Internal, "Could not broadcast signed aggregated attestation: %v", err) + } + + log.WithFields(logrus.Fields{ + "slot": req.SignedAggregateAndProof.Message.Aggregate.Data.Slot, + "committeeIndex": req.SignedAggregateAndProof.Message.Aggregate.Data.CommitteeIndex, + "validatorIndex": req.SignedAggregateAndProof.Message.AggregatorIndex, + "aggregatedCount": req.SignedAggregateAndProof.Message.Aggregate.AggregationBits.Count(), + }).Debug("Broadcasting aggregated attestation and proof") + + return ðpb.SignedAggregateSubmitResponse{}, nil } diff --git a/beacon-chain/rpc/validator/aggregator_test.go b/beacon-chain/rpc/validator/aggregator_test.go index 2a21b4bce8..de99175f1e 100644 --- a/beacon-chain/rpc/validator/aggregator_test.go +++ b/beacon-chain/rpc/validator/aggregator_test.go @@ -8,7 +8,6 @@ import ( ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/go-ssz" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" @@ -41,9 +40,9 @@ func TestSubmitAggregateAndProof_Syncing(t *testing.T) { BeaconDB: db, } - req := ðpb.AggregationRequest{CommitteeIndex: 1} + req := ðpb.AggregateSelectionRequest{CommitteeIndex: 1} wanted := "Syncing to latest head, not ready to respond" - if _, err := aggregatorServer.SubmitAggregateAndProof(ctx, req); !strings.Contains(err.Error(), wanted) { + if _, err := aggregatorServer.SubmitAggregateSelectionProof(ctx, req); err == nil || !strings.Contains(err.Error(), wanted) { t.Error("Did not receive wanted error") } } @@ -67,15 +66,15 @@ func TestSubmitAggregateAndProof_CantFindValidatorIndex(t *testing.T) { } priv := bls.RandKey() - sig := priv.Sign([]byte{'A'}, 0) - req := ðpb.AggregationRequest{CommitteeIndex: 1, SlotSignature: sig.Marshal(), PublicKey: pubKey(3)} + sig := priv.Sign([]byte{'A'}) + req := ðpb.AggregateSelectionRequest{CommitteeIndex: 1, SlotSignature: sig.Marshal(), PublicKey: pubKey(3)} wanted := "Could not locate validator index in DB" - if _, err := server.SubmitAggregateAndProof(ctx, req); !strings.Contains(err.Error(), wanted) { - t.Errorf("Did not receive wanted error: expected %v, received %v", wanted, err.Error()) + if _, err := server.SubmitAggregateSelectionProof(ctx, req); err == nil || !strings.Contains(err.Error(), wanted) { + t.Errorf("Did not get wanted error") } } -func TestSubmitAggregateAndProof_IsAggregator(t *testing.T) { +func TestSubmitAggregateAndProof_IsAggregatorAndNoAtts(t *testing.T) { db := dbutil.SetupDB(t) defer dbutil.TeardownDB(t, db) ctx := context.Background() @@ -83,8 +82,8 @@ func TestSubmitAggregateAndProof_IsAggregator(t *testing.T) { s, err := beaconstate.InitializeFromProto(&pbp2p.BeaconState{ RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), Validators: []*ethpb.Validator{ - ðpb.Validator{PublicKey: pubKey(0)}, - ðpb.Validator{PublicKey: pubKey(1)}, + {PublicKey: pubKey(0)}, + {PublicKey: pubKey(1)}, }, }) if err != nil { @@ -99,16 +98,16 @@ func TestSubmitAggregateAndProof_IsAggregator(t *testing.T) { } priv := bls.RandKey() - sig := priv.Sign([]byte{'A'}, 0) + sig := priv.Sign([]byte{'A'}) v, err := s.ValidatorAtIndex(1) if err != nil { t.Fatal(err) } pubKey := v.PublicKey - req := ðpb.AggregationRequest{CommitteeIndex: 1, SlotSignature: sig.Marshal(), PublicKey: pubKey} + req := ðpb.AggregateSelectionRequest{CommitteeIndex: 1, SlotSignature: sig.Marshal(), PublicKey: pubKey} - if _, err := server.SubmitAggregateAndProof(ctx, req); err != nil { - t.Fatal(err) + if _, err := server.SubmitAggregateSelectionProof(ctx, req); err == nil || !strings.Contains(err.Error(), "No aggregated attestation in beacon node") { + t.Error("Did not get wanted error") } } @@ -128,7 +127,7 @@ func TestSubmitAggregateAndProof_AggregateOk(t *testing.T) { if err != nil { t.Fatal(err) } - att1, err := generateAtt(beaconState, 1, privKeys) + att1, err := generateAtt(beaconState, 2, privKeys) if err != nil { t.Fatal(err) } @@ -147,22 +146,22 @@ func TestSubmitAggregateAndProof_AggregateOk(t *testing.T) { } priv := bls.RandKey() - sig := priv.Sign([]byte{'B'}, 0) + sig := priv.Sign([]byte{'B'}) v, err := beaconState.ValidatorAtIndex(1) if err != nil { t.Fatal(err) } pubKey := v.PublicKey - req := ðpb.AggregationRequest{CommitteeIndex: 1, SlotSignature: sig.Marshal(), PublicKey: pubKey} + req := ðpb.AggregateSelectionRequest{CommitteeIndex: 1, SlotSignature: sig.Marshal(), PublicKey: pubKey} - if err := aggregatorServer.AttPool.SaveUnaggregatedAttestation(att0); err != nil { + if err := aggregatorServer.AttPool.SaveAggregatedAttestation(att0); err != nil { t.Fatal(err) } - if err := aggregatorServer.AttPool.SaveUnaggregatedAttestation(att1); err != nil { + if err := aggregatorServer.AttPool.SaveAggregatedAttestation(att1); err != nil { t.Fatal(err) } - if _, err := aggregatorServer.SubmitAggregateAndProof(ctx, req); err != nil { + if _, err := aggregatorServer.SubmitAggregateSelectionProof(ctx, req); err != nil { t.Fatal(err) } @@ -187,13 +186,8 @@ func TestSubmitAggregateAndProof_AggregateNotOk(t *testing.T) { defer dbutil.TeardownDB(t, db) ctx := context.Background() - beaconState, privKeys := testutil.DeterministicGenesisState(t, 32) - att0, err := generateAtt(beaconState, 0, privKeys) - if err != nil { - t.Fatal(err) - } - err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay) - if err != nil { + beaconState, _ := testutil.DeterministicGenesisState(t, 32) + if err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay); err != nil { t.Fatal(err) } @@ -206,20 +200,16 @@ func TestSubmitAggregateAndProof_AggregateNotOk(t *testing.T) { } priv := bls.RandKey() - sig := priv.Sign([]byte{'B'}, 0) + sig := priv.Sign([]byte{'B'}) v, err := beaconState.ValidatorAtIndex(1) if err != nil { t.Fatal(err) } pubKey := v.PublicKey - req := ðpb.AggregationRequest{CommitteeIndex: 1, SlotSignature: sig.Marshal(), PublicKey: pubKey} + req := ðpb.AggregateSelectionRequest{CommitteeIndex: 1, SlotSignature: sig.Marshal(), PublicKey: pubKey} - if err := aggregatorServer.AttPool.SaveUnaggregatedAttestation(att0); err != nil { - t.Fatal(err) - } - - if _, err := aggregatorServer.SubmitAggregateAndProof(ctx, req); err != nil { - t.Fatal(err) + if _, err := aggregatorServer.SubmitAggregateSelectionProof(ctx, req); !strings.Contains(err.Error(), "No aggregated attestation in beacon node") { + t.Error("Did not get wanted error") } aggregatedAtts := aggregatorServer.AttPool.AggregatedAttestations() @@ -231,6 +221,7 @@ func TestSubmitAggregateAndProof_AggregateNotOk(t *testing.T) { func generateAtt(state *beaconstate.BeaconState, index uint64, privKeys []*bls.SecretKey) (*ethpb.Attestation, error) { aggBits := bitfield.NewBitlist(4) aggBits.SetBitAt(index, true) + aggBits.SetBitAt(index+1, true) att := ðpb.Attestation{ Data: ðpb.AttestationData{ CommitteeIndex: 1, @@ -244,7 +235,7 @@ func generateAtt(state *beaconstate.BeaconState, index uint64, privKeys []*bls.S return nil, err } attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee) - domain, err := helpers.Domain(state.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(state.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, params.BeaconConfig().ZeroHash[:]) if err != nil { return nil, err } @@ -254,11 +245,11 @@ func generateAtt(state *beaconstate.BeaconState, index uint64, privKeys []*bls.S att.Signature = zeroSig[:] for i, indice := range attestingIndices { - hashTreeRoot, err := ssz.HashTreeRoot(att.Data) + hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, domain) if err != nil { return nil, err } - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } diff --git a/beacon-chain/rpc/validator/assignments.go b/beacon-chain/rpc/validator/assignments.go index d4cdf71bf7..b06f90b7f4 100644 --- a/beacon-chain/rpc/validator/assignments.go +++ b/beacon-chain/rpc/validator/assignments.go @@ -4,11 +4,9 @@ import ( "context" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/featureconfig" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -36,7 +34,7 @@ func (vs *Server) GetDuties(ctx context.Context, req *ethpb.DutiesRequest) (*eth return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", epochStartSlot, err) } } - committeeAssignments, proposerIndexToSlot, err := helpers.CommitteeAssignments(s, req.Epoch) + committeeAssignments, proposerIndexToSlots, err := helpers.CommitteeAssignments(s, req.Epoch) if err != nil { return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err) } @@ -67,7 +65,7 @@ func (vs *Server) GetDuties(ctx context.Context, req *ethpb.DutiesRequest) (*eth assignment.ValidatorIndex = idx assignment.PublicKey = pubKey assignment.AttesterSlot = ca.AttesterSlot - assignment.ProposerSlot = proposerIndexToSlot[idx] + assignment.ProposerSlots = proposerIndexToSlots[idx] assignment.CommitteeIndex = ca.CommitteeIndex committeeIDs = append(committeeIDs, ca.CommitteeIndex) } @@ -85,11 +83,6 @@ func (vs *Server) GetDuties(ctx context.Context, req *ethpb.DutiesRequest) (*eth } - if featureconfig.Get().EnableDynamicCommitteeSubnets { - cache.CommitteeIDs.AddIDs(committeeIDs, req.Epoch) - cache.CommitteeIDs.AddIDs(nextCommitteeIDs, req.Epoch+1) - } - return ðpb.DutiesResponse{ Duties: validatorAssignments, }, nil diff --git a/beacon-chain/rpc/validator/assignments_test.go b/beacon-chain/rpc/validator/assignments_test.go index 544616fad1..cd6c754399 100644 --- a/beacon-chain/rpc/validator/assignments_test.go +++ b/beacon-chain/rpc/validator/assignments_test.go @@ -112,9 +112,9 @@ func TestGetDuties_OK(t *testing.T) { if err != nil { t.Fatal(err) } - state, err := state.GenesisBeaconState(deposits, 0, eth1Data) + bs, err := state.GenesisBeaconState(deposits, 0, eth1Data) if err != nil { - t.Fatalf("Could not setup genesis state: %v", err) + t.Fatalf("Could not setup genesis bs: %v", err) } genesisRoot, err := ssz.HashTreeRoot(genesis.Block) if err != nil { @@ -135,7 +135,7 @@ func TestGetDuties_OK(t *testing.T) { vs := &Server{ BeaconDB: db, - HeadFetcher: &mockChain.ChainService{State: state, Root: genesisRoot[:]}, + HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]}, SyncChecker: &mockSync.Sync{IsSyncing: false}, } @@ -148,9 +148,9 @@ func TestGetDuties_OK(t *testing.T) { if err != nil { t.Fatalf("Could not call epoch committee assignment %v", err) } - if res.Duties[0].AttesterSlot > state.Slot()+params.BeaconConfig().SlotsPerEpoch { + if res.Duties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", - res.Duties[0].AttesterSlot, state.Slot()+params.BeaconConfig().SlotsPerEpoch) + res.Duties[0].AttesterSlot, bs.Slot()+params.BeaconConfig().SlotsPerEpoch) } // Test the last validator in registry. @@ -163,9 +163,9 @@ func TestGetDuties_OK(t *testing.T) { if err != nil { t.Fatalf("Could not call epoch committee assignment %v", err) } - if res.Duties[0].AttesterSlot > state.Slot()+params.BeaconConfig().SlotsPerEpoch { + if res.Duties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", - res.Duties[0].AttesterSlot, state.Slot()+params.BeaconConfig().SlotsPerEpoch) + res.Duties[0].AttesterSlot, bs.Slot()+params.BeaconConfig().SlotsPerEpoch) } // We request for duties for all validators. @@ -245,6 +245,7 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) { genesis := blk.NewGenesisBlock([]byte{}) depChainStart := uint64(64) + testutil.ResetCache() deposits, _, err := testutil.DeterministicDepositsAndKeys(depChainStart) if err != nil { t.Fatal(err) @@ -253,9 +254,9 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) { if err != nil { t.Fatal(err) } - state, err := state.GenesisBeaconState(deposits, 0, eth1Data) + bs, err := state.GenesisBeaconState(deposits, 0, eth1Data) if err != nil { - t.Fatalf("Could not setup genesis state: %v", err) + t.Fatalf("Could not setup genesis bs: %v", err) } genesisRoot, err := ssz.HashTreeRoot(genesis.Block) if err != nil { @@ -271,7 +272,7 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) { vs := &Server{ BeaconDB: db, - HeadFetcher: &mockChain.ChainService{State: state, Root: genesisRoot[:]}, + HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]}, SyncChecker: &mockSync.Sync{IsSyncing: false}, } @@ -291,11 +292,11 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) { if len(res.Duties) != 2 { t.Errorf("expected 2 assignments but got %d", len(res.Duties)) } - if res.Duties[0].AttesterSlot != 4 { - t.Errorf("Expected res.Duties[0].AttesterSlot == 4, got %d", res.Duties[0].AttesterSlot) + if res.Duties[0].AttesterSlot != 2 { + t.Errorf("Expected res.Duties[0].AttesterSlot == 7, got %d", res.Duties[0].AttesterSlot) } - if res.Duties[1].AttesterSlot != 3 { - t.Errorf("Expected res.Duties[1].AttesterSlot == 3, got %d", res.Duties[0].AttesterSlot) + if res.Duties[1].AttesterSlot != 1 { + t.Errorf("Expected res.Duties[1].AttesterSlot == 1, got %d", res.Duties[1].AttesterSlot) } } @@ -304,7 +305,7 @@ func TestGetDuties_SyncNotReady(t *testing.T) { SyncChecker: &mockSync.Sync{IsSyncing: true}, } _, err := vs.GetDuties(context.Background(), ðpb.DutiesRequest{}) - if strings.Contains(err.Error(), "syncing to latest head") { + if err == nil || strings.Contains(err.Error(), "syncing to latest head") { t.Error("Did not get wanted error") } } @@ -323,9 +324,9 @@ func BenchmarkCommitteeAssignment(b *testing.B) { if err != nil { b.Fatal(err) } - state, err := state.GenesisBeaconState(deposits, 0, eth1Data) + bs, err := state.GenesisBeaconState(deposits, 0, eth1Data) if err != nil { - b.Fatalf("Could not setup genesis state: %v", err) + b.Fatalf("Could not setup genesis bs: %v", err) } genesisRoot, err := ssz.HashTreeRoot(genesis.Block) if err != nil { @@ -341,7 +342,7 @@ func BenchmarkCommitteeAssignment(b *testing.B) { vs := &Server{ BeaconDB: db, - HeadFetcher: &mockChain.ChainService{State: state, Root: genesisRoot[:]}, + HeadFetcher: &mockChain.ChainService{State: bs, Root: genesisRoot[:]}, SyncChecker: &mockSync.Sync{IsSyncing: false}, } diff --git a/beacon-chain/rpc/validator/attester.go b/beacon-chain/rpc/validator/attester.go index d5f626e920..7e692ebad6 100644 --- a/beacon-chain/rpc/validator/attester.go +++ b/beacon-chain/rpc/validator/attester.go @@ -2,15 +2,13 @@ package validator import ( "context" - "errors" - "time" + ptypes "github.com/gogo/protobuf/types" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" "github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation" - statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" @@ -18,9 +16,6 @@ import ( "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/shared/roughtime" - "github.com/prysmaticlabs/prysm/shared/slotutil" - "github.com/prysmaticlabs/prysm/shared/traceutil" "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -38,12 +33,6 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation trace.Int64Attribute("committeeIndex", int64(req.CommitteeIndex)), ) - // If attestation committee subnets are enabled, we track the committee - // index into a cache. - if featureconfig.Get().EnableDynamicCommitteeSubnets { - cache.CommitteeIDs.AddIDs([]uint64{req.CommitteeIndex}, helpers.SlotToEpoch(req.Slot)) - } - if vs.SyncChecker.Syncing() { return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond") } @@ -53,10 +42,6 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation return nil, status.Error(codes.InvalidArgument, msgInvalidAttestationRequest) } - // Attester will either wait until there's a valid block from the expected block proposer of for the assigned input slot - // or one third of the slot has transpired. Whichever comes first. - vs.waitToOneThird(ctx, req.Slot) - res, err := vs.AttestationCache.Get(ctx, req) if err != nil { return nil, status.Errorf(codes.Internal, "Could not retrieve data from attestation cache: %v", err) @@ -97,7 +82,7 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation // processed, we walk up the chain until state.Slot <= req.Slot to prevent producing an // attestation that violates processing constraints. fetchState := vs.BeaconDB.State - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { fetchState = vs.StateGen.StateByRoot } for headState.Slot() > req.Slot { @@ -161,12 +146,6 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation return nil, status.Error(codes.InvalidArgument, "Incorrect attestation signature") } - // If attestation committee subnets are enabled, we track the committee - // index into a cache. - if featureconfig.Get().EnableDynamicCommitteeSubnets { - cache.CommitteeIDs.AddIDs([]uint64{att.Data.CommitteeIndex}, helpers.SlotToEpoch(att.Data.Slot)) - } - root, err := ssz.HashTreeRoot(att.Data) if err != nil { return nil, status.Errorf(codes.Internal, "Could not tree hash attestation: %v", err) @@ -200,46 +179,18 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation }, nil } -// waitToOneThird waits until one-third of the way through the slot -// or the head slot equals to the input slot. -func (vs *Server) waitToOneThird(ctx context.Context, slot uint64) { - ctx, span := trace.StartSpan(ctx, "validator.waitToOneThird") - defer span.End() - - // Don't need to wait if current slot is greater than requested slot. - if slot < vs.GenesisTimeFetcher.CurrentSlot() { - return +// SubscribeCommitteeSubnets subscribes to the committee ID subnet given subscribe request. +func (vs *Server) SubscribeCommitteeSubnets(ctx context.Context, req *ethpb.CommitteeSubnetsSubscribeRequest) (*ptypes.Empty, error) { + if len(req.Slots) != len(req.CommitteeIds) && len(req.CommitteeIds) != len(req.IsAggregator) { + return nil, status.Error(codes.InvalidArgument, "request fields are not the same length") } - // Set time out to be at start slot time + one-third of slot duration. - slotStartTime := slotutil.SlotStartTime(uint64(vs.GenesisTimeFetcher.GenesisTime().Unix()), slot) - slotOneThirdTime := slotStartTime.Unix() + int64(params.BeaconConfig().SecondsPerSlot/3) - waitDuration := slotOneThirdTime - roughtime.Now().Unix() - timeOut := time.After(time.Duration(waitDuration) * time.Second) - - stateChannel := make(chan *feed.Event, 1) - stateSub := vs.StateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - - for { - select { - case event := <-stateChannel: - // Node processed a block, check if the processed block is the same as input slot. - if event.Type == statefeed.BlockProcessed { - d, ok := event.Data.(*statefeed.BlockProcessedData) - if !ok { - err := errors.New("event feed is not type *statefeed.BlockProcessedData") - traceutil.AnnotateError(span, err) - log.Error(err) - continue - } - if slot == d.Slot { - return - } - } - - case <-timeOut: - return + for i := 0; i < len(req.Slots); i++ { + cache.CommitteeIDs.AddAttesterCommiteeID(req.Slots[i], req.CommitteeIds[i]) + if req.IsAggregator[i] { + cache.CommitteeIDs.AddAggregatorCommiteeID(req.Slots[i], req.CommitteeIds[i]) } } + + return &ptypes.Empty{}, nil } diff --git a/beacon-chain/rpc/validator/attester_test.go b/beacon-chain/rpc/validator/attester_test.go index 1b84ac76a0..fb45fefffb 100644 --- a/beacon-chain/rpc/validator/attester_test.go +++ b/beacon-chain/rpc/validator/attester_test.go @@ -17,11 +17,13 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing" pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/roughtime" + "github.com/prysmaticlabs/prysm/shared/testutil" "google.golang.org/grpc/status" ) @@ -57,7 +59,7 @@ func TestProposeAttestation_OK(t *testing.T) { t.Fatal(err) } - validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/16) + validators := make([]*ethpb.Validator, 64) for i := 0; i < len(validators); i++ { validators[i] = ðpb.Validator{ ExitEpoch: params.BeaconConfig().FarFutureEpoch, @@ -65,12 +67,11 @@ func TestProposeAttestation_OK(t *testing.T) { } } - state, err := beaconstate.InitializeFromProto(&pbp2p.BeaconState{ - Slot: params.BeaconConfig().SlotsPerEpoch + 1, - Validators: validators, - RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), - }) - if err != nil { + state := testutil.NewBeaconState() + if err := state.SetSlot(params.BeaconConfig().SlotsPerEpoch + 1); err != nil { + t.Fatal(err) + } + if err := state.SetValidators(validators); err != nil { t.Fatal(err) } @@ -82,7 +83,7 @@ func TestProposeAttestation_OK(t *testing.T) { } sk := bls.RandKey() - sig := sk.Sign([]byte("dummy_test_data"), 0 /*domain*/) + sig := sk.Sign([]byte("dummy_test_data")) req := ðpb.Attestation{ Signature: sig.Marshal(), Data: ðpb.AttestationData{ @@ -116,7 +117,7 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) { }, } wanted := "Incorrect attestation signature" - if _, err := attesterServer.ProposeAttestation(context.Background(), req); !strings.Contains(err.Error(), wanted) { + if _, err := attesterServer.ProposeAttestation(context.Background(), req); err == nil || !strings.Contains(err.Error(), wanted) { t.Errorf("Did not get wanted error") } } @@ -148,35 +149,44 @@ func TestGetAttestationData_OK(t *testing.T) { t.Fatalf("Could not get signing root for target block: %v", err) } slot := 3*params.BeaconConfig().SlotsPerEpoch + 1 - beaconState := &pbp2p.BeaconState{ - Slot: slot, - BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot), - CurrentJustifiedCheckpoint: ðpb.Checkpoint{ - Epoch: 2, - Root: justifiedRoot[:], - }, + beaconState := testutil.NewBeaconState() + if err := beaconState.SetSlot(slot); err != nil { + t.Fatal(err) } - beaconState.BlockRoots[1] = blockRoot[:] - beaconState.BlockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:] - beaconState.BlockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:] - s, err := beaconstate.InitializeFromProto(beaconState) - if err != nil { + if err := beaconState.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{ + Epoch: 2, + Root: justifiedRoot[:], + }); err != nil { + t.Fatal(err) + } + + blockRoots := beaconState.BlockRoots() + blockRoots[1] = blockRoot[:] + blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:] + blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:] + if err := beaconState.SetBlockRoots(blockRoots); err != nil { t.Fatal(err) } chainService := &mock.ChainService{ Genesis: time.Now(), } attesterServer := &Server{ - BeaconDB: db, - P2P: &mockp2p.MockBroadcaster{}, - SyncChecker: &mockSync.Sync{IsSyncing: false}, - AttestationCache: cache.NewAttestationCache(), - HeadFetcher: &mock.ChainService{State: s, Root: blockRoot[:]}, - FinalizationFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint}, - GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*int64(slot*params.BeaconConfig().SecondsPerSlot)) * time.Second)}, - StateNotifier: chainService.StateNotifier(), + BeaconDB: db, + P2P: &mockp2p.MockBroadcaster{}, + SyncChecker: &mockSync.Sync{IsSyncing: false}, + AttestationCache: cache.NewAttestationCache(), + HeadFetcher: &mock.ChainService{ + State: beaconState, Root: blockRoot[:], + }, + FinalizationFetcher: &mock.ChainService{ + CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(), + }, + GenesisTimeFetcher: &mock.ChainService{ + Genesis: time.Now().Add(time.Duration(-1*int64(slot*params.BeaconConfig().SecondsPerSlot)) * time.Second), + }, + StateNotifier: chainService.StateNotifier(), } - if err := db.SaveState(ctx, s, blockRoot); err != nil { + if err := db.SaveState(ctx, beaconState, blockRoot); err != nil { t.Fatal(err) } if err := db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: block}); err != nil { @@ -218,7 +228,7 @@ func TestGetAttestationData_SyncNotReady(t *testing.T) { SyncChecker: &mockSync.Sync{IsSyncing: true}, } _, err := as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{}) - if strings.Contains(err.Error(), "syncing to latest head") { + if err == nil || strings.Contains(err.Error(), "syncing to latest head") { t.Error("Did not get wanted error") } } @@ -265,35 +275,40 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) { t.Fatalf("Could not hash justified block: %v", err) } slot := uint64(10000) - beaconState := &pbp2p.BeaconState{ - Slot: slot, - BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot), - CurrentJustifiedCheckpoint: ðpb.Checkpoint{ - Epoch: helpers.SlotToEpoch(1500), - Root: justifiedBlockRoot[:], - }, + + beaconState := testutil.NewBeaconState() + if err := beaconState.SetSlot(slot); err != nil { + t.Fatal(err) } - beaconState.BlockRoots[1] = blockRoot[:] - beaconState.BlockRoots[1*params.BeaconConfig().SlotsPerEpoch] = epochBoundaryRoot[:] - beaconState.BlockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedBlockRoot[:] - s, err := beaconstate.InitializeFromProto(beaconState) - if err != nil { + if err := beaconState.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{ + Epoch: helpers.SlotToEpoch(1500), + Root: justifiedBlockRoot[:], + }); err != nil { + t.Fatal(err) + } + blockRoots := beaconState.BlockRoots() + blockRoots[1] = blockRoot[:] + blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = epochBoundaryRoot[:] + blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedBlockRoot[:] + if err := beaconState.SetBlockRoots(blockRoots); err != nil { t.Fatal(err) } chainService := &mock.ChainService{ Genesis: time.Now(), } attesterServer := &Server{ - BeaconDB: db, - P2P: &mockp2p.MockBroadcaster{}, - AttestationCache: cache.NewAttestationCache(), - HeadFetcher: &mock.ChainService{State: s, Root: blockRoot[:]}, - FinalizationFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint}, - SyncChecker: &mockSync.Sync{IsSyncing: false}, - GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*int64(slot*params.BeaconConfig().SecondsPerSlot)) * time.Second)}, - StateNotifier: chainService.StateNotifier(), + BeaconDB: db, + P2P: &mockp2p.MockBroadcaster{}, + AttestationCache: cache.NewAttestationCache(), + HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]}, + FinalizationFetcher: &mock.ChainService{ + CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(), + }, + SyncChecker: &mockSync.Sync{IsSyncing: false}, + GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*int64(slot*params.BeaconConfig().SecondsPerSlot)) * time.Second)}, + StateNotifier: chainService.StateNotifier(), } - if err := db.SaveState(ctx, s, blockRoot); err != nil { + if err := db.SaveState(ctx, beaconState, blockRoot); err != nil { t.Fatal(err) } if err := db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: block}); err != nil { @@ -391,56 +406,6 @@ func TestAttestationDataSlot_handlesInProgressRequest(t *testing.T) { wg.Wait() } -func TestWaitForSlotOneThird_WaitedCorrectly(t *testing.T) { - currentTime := uint64(roughtime.Now().Unix()) - numOfSlots := uint64(4) - genesisTime := currentTime - (numOfSlots * params.BeaconConfig().SecondsPerSlot) - - chainService := &mock.ChainService{ - Genesis: time.Now(), - } - server := &Server{ - AttestationCache: cache.NewAttestationCache(), - HeadFetcher: &mock.ChainService{}, - SyncChecker: &mockSync.Sync{IsSyncing: false}, - GenesisTimeFetcher: &mock.ChainService{Genesis: time.Unix(int64(genesisTime), 0)}, - StateNotifier: chainService.StateNotifier(), - } - - timeToSleep := params.BeaconConfig().SecondsPerSlot / 3 - oneThird := currentTime + timeToSleep - server.waitToOneThird(context.Background(), numOfSlots) - - currentTime = uint64(roughtime.Now().Unix()) - if currentTime != oneThird { - t.Errorf("Wanted %d time for slot one third but got %d", oneThird, currentTime) - } -} - -func TestWaitForSlotOneThird_HeadIsHereNoWait(t *testing.T) { - currentTime := uint64(roughtime.Now().Unix()) - numOfSlots := uint64(4) - genesisTime := currentTime - (numOfSlots * params.BeaconConfig().SecondsPerSlot) - - s := &pbp2p.BeaconState{Slot: 2} - state, err := beaconstate.InitializeFromProto(s) - if err != nil { - t.Fatal(err) - } - server := &Server{ - AttestationCache: cache.NewAttestationCache(), - HeadFetcher: &mock.ChainService{State: state}, - SyncChecker: &mockSync.Sync{IsSyncing: false}, - GenesisTimeFetcher: &mock.ChainService{Genesis: time.Unix(int64(genesisTime), 0)}, - } - - server.waitToOneThird(context.Background(), s.Slot) - - if currentTime != uint64(time.Now().Unix()) { - t.Errorf("Wanted %d time for slot one third but got %d", uint64(time.Now().Unix()), currentTime) - } -} - func TestServer_GetAttestationData_InvalidRequestSlot(t *testing.T) { ctx := context.Background() @@ -498,32 +463,40 @@ func TestServer_GetAttestationData_HeadStateSlotGreaterThanRequestSlot(t *testin if err != nil { t.Fatalf("Could not get signing root for target block: %v", err) } - beaconState := &pbp2p.BeaconState{ - Slot: slot, - GenesisTime: uint64(time.Now().Unix() - int64((slot * params.BeaconConfig().SecondsPerSlot))), - BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot), - LatestBlockHeader: ðpb.BeaconBlockHeader{ - ParentRoot: blockRoot2[:], - }, - CurrentJustifiedCheckpoint: ðpb.Checkpoint{ - Epoch: 2, - Root: justifiedRoot[:], - }, - } - beaconState.BlockRoots[1] = blockRoot[:] - beaconState.BlockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:] - beaconState.BlockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:] - s, err := beaconstate.InitializeFromProto(beaconState) - if err != nil { + + beaconState := testutil.NewBeaconState() + if err := beaconState.SetSlot(slot); err != nil { t.Fatal(err) } - beaconState2 := s.CloneInnerState() - beaconState2.Slot-- - s2, err := beaconstate.InitializeFromProto(beaconState2) - if err != nil { + if err := beaconState.SetGenesisTime(uint64(time.Now().Unix() - int64(slot*params.BeaconConfig().SecondsPerSlot))); err != nil { t.Fatal(err) } - if err := db.SaveState(ctx, s2, blockRoot2); err != nil { + if err := beaconState.SetLatestBlockHeader(ðpb.BeaconBlockHeader{ + ParentRoot: blockRoot2[:], + StateRoot: make([]byte, 32), + BodyRoot: make([]byte, 32), + }); err != nil { + t.Fatal(err) + } + if err := beaconState.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{ + Epoch: 2, + Root: justifiedRoot[:], + }); err != nil { + t.Fatal(err) + } + blockRoots := beaconState.BlockRoots() + blockRoots[1] = blockRoot[:] + blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:] + blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:] + if err := beaconState.SetBlockRoots(blockRoots); err != nil { + t.Fatal(err) + } + + beaconState2 := beaconState.Copy() + if err := beaconState2.SetSlot(beaconState2.Slot() - 1); err != nil { + t.Fatal(err) + } + if err := db.SaveState(ctx, beaconState2, blockRoot2); err != nil { t.Fatal(err) } chainService := &mock.ChainService{ @@ -534,12 +507,13 @@ func TestServer_GetAttestationData_HeadStateSlotGreaterThanRequestSlot(t *testin P2P: &mockp2p.MockBroadcaster{}, SyncChecker: &mockSync.Sync{IsSyncing: false}, AttestationCache: cache.NewAttestationCache(), - HeadFetcher: &mock.ChainService{State: s, Root: blockRoot[:]}, - FinalizationFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint}, + HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]}, + FinalizationFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint()}, GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*int64(slot*params.BeaconConfig().SecondsPerSlot)) * time.Second)}, StateNotifier: chainService.StateNotifier(), + StateGen: stategen.New(db, cache.NewStateSummaryCache()), } - if err := db.SaveState(ctx, s, blockRoot); err != nil { + if err := db.SaveState(ctx, beaconState, blockRoot); err != nil { t.Fatal(err) } if err := db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: block}); err != nil { @@ -603,35 +577,42 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) { if err != nil { t.Fatalf("Could not get signing root for target block: %v", err) } - beaconState := &pbp2p.BeaconState{ - Slot: slot, - BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot), - CurrentJustifiedCheckpoint: ðpb.Checkpoint{ - Epoch: 0, - Root: justifiedRoot[:], - }, + + beaconState := testutil.NewBeaconState() + if err := beaconState.SetSlot(slot); err != nil { + t.Fatal(err) } - beaconState.BlockRoots[1] = blockRoot[:] - beaconState.BlockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:] - beaconState.BlockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:] - s, err := beaconstate.InitializeFromProto(beaconState) - if err != nil { + if err := beaconState.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{ + Epoch: 0, + Root: justifiedRoot[:], + }); err != nil { + t.Fatal(err) + } + blockRoots := beaconState.BlockRoots() + blockRoots[1] = blockRoot[:] + blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:] + blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:] + if err := beaconState.SetBlockRoots(blockRoots); err != nil { t.Fatal(err) } chainService := &mock.ChainService{ Genesis: time.Now(), } attesterServer := &Server{ - BeaconDB: db, - P2P: &mockp2p.MockBroadcaster{}, - SyncChecker: &mockSync.Sync{IsSyncing: false}, - AttestationCache: cache.NewAttestationCache(), - HeadFetcher: &mock.ChainService{State: s, Root: blockRoot[:]}, - FinalizationFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint}, - GenesisTimeFetcher: &mock.ChainService{Genesis: roughtime.Now().Add(time.Duration(-1*int64(slot*params.BeaconConfig().SecondsPerSlot)) * time.Second)}, - StateNotifier: chainService.StateNotifier(), + BeaconDB: db, + P2P: &mockp2p.MockBroadcaster{}, + SyncChecker: &mockSync.Sync{IsSyncing: false}, + AttestationCache: cache.NewAttestationCache(), + HeadFetcher: &mock.ChainService{ + State: beaconState, Root: blockRoot[:], + }, + FinalizationFetcher: &mock.ChainService{ + CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(), + }, + GenesisTimeFetcher: &mock.ChainService{Genesis: roughtime.Now().Add(time.Duration(-1*int64(slot*params.BeaconConfig().SecondsPerSlot)) * time.Second)}, + StateNotifier: chainService.StateNotifier(), } - if err := db.SaveState(ctx, s, blockRoot); err != nil { + if err := db.SaveState(ctx, beaconState, blockRoot); err != nil { t.Fatal(err) } if err := db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: block}); err != nil { diff --git a/beacon-chain/rpc/validator/exit.go b/beacon-chain/rpc/validator/exit.go index 71da619cdd..58544252e2 100644 --- a/beacon-chain/rpc/validator/exit.go +++ b/beacon-chain/rpc/validator/exit.go @@ -28,7 +28,7 @@ func (vs *Server) ProposeExit(ctx context.Context, req *ethpb.SignedVoluntaryExi if err != nil { return nil, status.Error(codes.InvalidArgument, "validator index exceeds validator set length") } - if err := blocks.VerifyExit(val, helpers.StartSlot(req.Exit.Epoch), s.Fork(), req); err != nil { + if err := blocks.VerifyExit(val, helpers.StartSlot(req.Exit.Epoch), s.Fork(), req, s.GenesisValidatorRoot()); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } diff --git a/beacon-chain/rpc/validator/exit_test.go b/beacon-chain/rpc/validator/exit_test.go index cacae319b2..25cdc45beb 100644 --- a/beacon-chain/rpc/validator/exit_test.go +++ b/beacon-chain/rpc/validator/exit_test.go @@ -11,6 +11,7 @@ import ( blk "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits" @@ -24,7 +25,8 @@ func TestSub(t *testing.T) { db := dbutil.SetupDB(t) defer dbutil.TeardownDB(t, db) ctx := context.Background() - deposits, _, err := testutil.DeterministicDepositsAndKeys(params.BeaconConfig().MinGenesisActiveValidatorCount) + testutil.ResetCache() + deposits, keys, err := testutil.DeterministicDepositsAndKeys(params.BeaconConfig().MinGenesisActiveValidatorCount) if err != nil { t.Fatal(err) } @@ -68,8 +70,16 @@ func TestSub(t *testing.T) { Epoch: epoch, ValidatorIndex: validatorIndex, }, - Signature: []byte{0xb3, 0xe1, 0x9d, 0xc6, 0x7c, 0x78, 0x6c, 0xcf, 0x33, 0x1d, 0xb9, 0x6f, 0x59, 0x64, 0x44, 0xe1, 0x29, 0xd0, 0x87, 0x03, 0x26, 0x6e, 0x49, 0x1c, 0x05, 0xae, 0x16, 0x7b, 0x04, 0x0f, 0x3f, 0xf8, 0x82, 0x77, 0x60, 0xfc, 0xcf, 0x2f, 0x59, 0xc7, 0x40, 0x0b, 0x2c, 0xa9, 0x23, 0x8a, 0x6c, 0x8d, 0x01, 0x21, 0x5e, 0xa8, 0xac, 0x36, 0x70, 0x31, 0xb0, 0xe1, 0xa8, 0xb8, 0x8f, 0x93, 0x8c, 0x1c, 0xa2, 0x86, 0xe7, 0x22, 0x00, 0x6a, 0x7d, 0x36, 0xc0, 0x2b, 0x86, 0x2c, 0xf5, 0xf9, 0x10, 0xb9, 0xf2, 0xbd, 0x5e, 0xa6, 0x5f, 0x12, 0x86, 0x43, 0x20, 0x4d, 0xa2, 0x9d, 0x8b, 0xe6, 0x6f, 0x09}, } + domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainVoluntaryExit, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + sigRoot, err := helpers.ComputeSigningRoot(req.Exit, domain) + if err != nil { + t.Fatalf("Could not compute signing root: %v", err) + } + req.Signature = keys[0].Sign(sigRoot[:]).Marshal() _, err = server.ProposeExit(context.Background(), req) if err != nil { diff --git a/beacon-chain/rpc/validator/proposer.go b/beacon-chain/rpc/validator/proposer.go index 69c7dddccb..0cb8e275ab 100644 --- a/beacon-chain/rpc/validator/proposer.go +++ b/beacon-chain/rpc/validator/proposer.go @@ -84,10 +84,20 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb return nil, status.Errorf(codes.Internal, "Could not get head state %v", err) } + // Calculate new proposer index. + if err := head.SetSlot(req.Slot); err != nil { + return nil, status.Errorf(codes.Internal, "Could not set slot to calculate proposer index: %v", err) + } + idx, err := helpers.BeaconProposerIndex(head) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not calculate proposer index %v", err) + } + blk := ðpb.BeaconBlock{ - Slot: req.Slot, - ParentRoot: parentRoot[:], - StateRoot: stateRoot, + Slot: req.Slot, + ParentRoot: parentRoot[:], + StateRoot: stateRoot, + ProposerIndex: idx, Body: ðpb.BeaconBlockBody{ Eth1Data: eth1Data, Deposits: deposits, @@ -154,7 +164,7 @@ func (vs *Server) eth1Data(ctx context.Context, slot uint64) (*ethpb.Eth1Data, e eth1DataNotification = false eth1VotingPeriodStartTime, _ := vs.Eth1InfoFetcher.Eth2GenesisPowchainInfo() - eth1VotingPeriodStartTime += (slot - (slot % params.BeaconConfig().SlotsPerEth1VotingPeriod)) * params.BeaconConfig().SecondsPerSlot + eth1VotingPeriodStartTime += (slot - (slot % (params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch))) * params.BeaconConfig().SecondsPerSlot // Look up most recent block up to timestamp blockNumber, err := vs.Eth1BlockFetcher.BlockNumberByTimestamp(ctx, eth1VotingPeriodStartTime) @@ -183,7 +193,7 @@ func (vs *Server) mockETH1DataVote(ctx context.Context, slot uint64) (*ethpb.Eth // DepositCount = state.eth1_deposit_index, // BlockHash = hash(hash(current_epoch + slot_in_voting_period)), // ) - slotInVotingPeriod := slot % params.BeaconConfig().SlotsPerEth1VotingPeriod + slotInVotingPeriod := slot % (params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch) headState, err := vs.HeadFetcher.HeadState(ctx) if err != nil { return nil, err @@ -226,7 +236,7 @@ func (vs *Server) randomETH1DataVote(ctx context.Context) (*ethpb.Eth1Data, erro func (vs *Server) computeStateRoot(ctx context.Context, block *ethpb.SignedBeaconBlock) ([]byte, error) { var beaconState *stateTrie.BeaconState var err error - if featureconfig.Get().NewStateMgmt { + if !featureconfig.Get().DisableNewStateMgmt { beaconState, err = vs.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(block.Block.ParentRoot)) if err != nil { return nil, errors.Wrap(err, "could not retrieve beacon state") diff --git a/beacon-chain/rpc/validator/proposer_test.go b/beacon-chain/rpc/validator/proposer_test.go index 50c0d4204f..fe2856b011 100644 --- a/beacon-chain/rpc/validator/proposer_test.go +++ b/beacon-chain/rpc/validator/proposer_test.go @@ -13,6 +13,7 @@ import ( "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/go-ssz" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/cache" "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache" b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" @@ -22,6 +23,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits" mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing" beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing" dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db" pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" @@ -44,7 +46,10 @@ func TestGetBlock_OK(t *testing.T) { defer dbutil.TeardownDB(t, db) ctx := context.Background() - beaconState, privKeys := testutil.DeterministicGenesisState(t, params.BeaconConfig().MinGenesisActiveValidatorCount) + testutil.ResetCache() + params.OverrideBeaconConfig(params.MainnetConfig()) + defer params.OverrideBeaconConfig(params.MinimalSpecConfig()) + beaconState, privKeys := testutil.DeterministicGenesisState(t, 64) stateRoot, err := beaconState.HashTreeRoot(ctx) if err != nil { @@ -79,6 +84,7 @@ func TestGetBlock_OK(t *testing.T) { AttPool: attestations.NewPool(), SlashingsPool: slashings.NewPool(), ExitPool: voluntaryexits.NewPool(), + StateGen: stategen.New(db, cache.NewStateSummaryCache()), } randaoReveal, err := testutil.RandaoReveal(beaconState, 0, privKeys) @@ -149,6 +155,8 @@ func TestGetBlock_AddsUnaggregatedAtts(t *testing.T) { defer dbutil.TeardownDB(t, db) ctx := context.Background() + params.OverrideBeaconConfig(params.MainnetConfig()) + defer params.OverrideBeaconConfig(params.MinimalSpecConfig()) beaconState, privKeys := testutil.DeterministicGenesisState(t, params.BeaconConfig().MinGenesisActiveValidatorCount) stateRoot, err := beaconState.HashTreeRoot(ctx) @@ -184,6 +192,7 @@ func TestGetBlock_AddsUnaggregatedAtts(t *testing.T) { SlashingsPool: slashings.NewPool(), AttPool: attestations.NewPool(), ExitPool: voluntaryexits.NewPool(), + StateGen: stategen.New(db, cache.NewStateSummaryCache()), } // Generate a bunch of random attestations at slot. These would be considered double votes, but @@ -205,7 +214,7 @@ func TestGetBlock_AddsUnaggregatedAtts(t *testing.T) { // Generate some more random attestations with a larger spread so that we can capture at least // one unaggregated attestation. - if atts, err := testutil.GenerateAttestations(beaconState, privKeys, 8, 1, true); err != nil { + if atts, err := testutil.GenerateAttestations(beaconState, privKeys, 300, 1, true); err != nil { t.Fatal(err) } else { found := false @@ -270,13 +279,15 @@ func TestProposeBlock_OK(t *testing.T) { db := dbutil.SetupDB(t) defer dbutil.TeardownDB(t, db) ctx := context.Background() + params.OverrideBeaconConfig(params.MainnetConfig()) + defer params.OverrideBeaconConfig(params.MinimalSpecConfig()) genesis := b.NewGenesisBlock([]byte{}) if err := db.SaveBlock(context.Background(), genesis); err != nil { t.Fatalf("Could not save genesis block: %v", err) } - numDeposits := params.BeaconConfig().MinGenesisActiveValidatorCount + numDeposits := uint64(64) beaconState, _ := testutil.DeterministicGenesisState(t, numDeposits) genesisRoot, err := ssz.HashTreeRoot(genesis.Block) @@ -317,6 +328,8 @@ func TestComputeStateRoot_OK(t *testing.T) { defer dbutil.TeardownDB(t, db) ctx := context.Background() + params.OverrideBeaconConfig(params.MainnetConfig()) + defer params.OverrideBeaconConfig(params.MinimalSpecConfig()) beaconState, privKeys := testutil.DeterministicGenesisState(t, 100) stateRoot, err := beaconState.HashTreeRoot(ctx) @@ -345,12 +358,14 @@ func TestComputeStateRoot_OK(t *testing.T) { ChainStartFetcher: &mockPOW.POWChain{}, Eth1InfoFetcher: &mockPOW.POWChain{}, Eth1BlockFetcher: &mockPOW.POWChain{}, + StateGen: stategen.New(db, cache.NewStateSummaryCache()), } req := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ - ParentRoot: parentRoot[:], - Slot: 1, + ProposerIndex: 41, + ParentRoot: parentRoot[:], + Slot: 1, Body: ðpb.BeaconBlockBody{ RandaoReveal: nil, ProposerSlashings: nil, @@ -374,16 +389,16 @@ func TestComputeStateRoot_OK(t *testing.T) { t.Fatal(err) } req.Block.Body.RandaoReveal = randaoReveal[:] - signingRoot, err := ssz.HashTreeRoot(req.Block) - if err != nil { - t.Error(err) - } currentEpoch := helpers.CurrentEpoch(beaconState) - domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer) + domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - blockSig := privKeys[proposerIdx].Sign(signingRoot[:], domain).Marshal() + signingRoot, err := helpers.ComputeSigningRoot(req.Block, domain) + if err != nil { + t.Error(err) + } + blockSig := privKeys[proposerIdx].Sign(signingRoot[:]).Marshal() req.Signature = blockSig[:] _, err = proposerServer.computeStateRoot(context.Background(), req) @@ -414,7 +429,8 @@ func TestPendingDeposits_Eth1DataVoteOK(t *testing.T) { BlockHash: blockHash, DepositCount: 3, } - for i := 0; i <= int(params.BeaconConfig().SlotsPerEth1VotingPeriod/2); i++ { + period := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch + for i := 0; i <= int(period/2); i++ { votes = append(votes, vote) } @@ -644,7 +660,8 @@ func TestPendingDeposits_FollowsCorrectEth1Block(t *testing.T) { BlockHash: []byte("0x1"), DepositCount: 7, } - for i := 0; i <= int(params.BeaconConfig().SlotsPerEth1VotingPeriod/2); i++ { + period := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch + for i := 0; i <= int(period/2); i++ { votes = append(votes, vote) } @@ -1114,7 +1131,7 @@ func TestEth1Data_EmptyVotesFetchBlockHashFailure(t *testing.T) { HeadFetcher: &mock.ChainService{State: beaconState}, } want := "could not fetch ETH1_FOLLOW_DISTANCE ancestor" - if _, err := proposerServer.eth1Data(context.Background(), beaconState.Slot()+1); !strings.Contains(err.Error(), want) { + if _, err := proposerServer.eth1Data(context.Background(), beaconState.Slot()+1); err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Expected error %v, received %v", want, err) } } @@ -1233,10 +1250,8 @@ func TestEth1Data_MockEnabled(t *testing.T) { // BlockHash = hash(hash(current_epoch + slot_in_voting_period)), // ) ctx := context.Background() - headState, err := beaconstate.InitializeFromProto(&pbp2p.BeaconState{ - Eth1DepositIndex: 64, - }) - if err != nil { + headState := testutil.NewBeaconState() + if err := headState.SetEth1DepositIndex(64); err != nil { t.Fatal(err) } ps := &Server{ @@ -1256,7 +1271,8 @@ func TestEth1Data_MockEnabled(t *testing.T) { if err != nil { t.Fatal(err) } - wantedSlot := 100 % params.BeaconConfig().SlotsPerEth1VotingPeriod + period := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch + wantedSlot := 100 % period currentEpoch := helpers.SlotToEpoch(100) enc, err := ssz.Marshal(currentEpoch + wantedSlot) if err != nil { @@ -1279,13 +1295,18 @@ func TestFilterAttestation_OK(t *testing.T) { defer dbutil.TeardownDB(t, db) ctx := context.Background() + params.OverrideBeaconConfig(params.MainnetConfig()) + defer params.OverrideBeaconConfig(params.MinimalSpecConfig()) genesis := b.NewGenesisBlock([]byte{}) if err := db.SaveBlock(context.Background(), genesis); err != nil { t.Fatalf("Could not save genesis block: %v", err) } - numDeposits := params.BeaconConfig().MinGenesisActiveValidatorCount + numDeposits := uint64(64) state, privKeys := testutil.DeterministicGenesisState(t, numDeposits) + if err := state.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]); err != nil { + t.Fatal(err) + } genesisRoot, err := ssz.HashTreeRoot(genesis.Block) if err != nil { @@ -1320,7 +1341,7 @@ func TestFilterAttestation_OK(t *testing.T) { } for i := 0; i < len(atts); i++ { - aggBits := bitfield.NewBitlist(4) + aggBits := bitfield.NewBitlist(2) aggBits.SetBitAt(0, true) atts[i] = ðpb.Attestation{Data: ðpb.AttestationData{ CommitteeIndex: uint64(i), @@ -1336,7 +1357,7 @@ func TestFilterAttestation_OK(t *testing.T) { if err != nil { t.Error(err) } - domain, err := helpers.Domain(state.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(state.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, params.BeaconConfig().ZeroHash[:]) if err != nil { t.Fatal(err) } @@ -1345,11 +1366,11 @@ func TestFilterAttestation_OK(t *testing.T) { atts[i].Signature = zeroSig[:] for i, indice := range attestingIndices { - hashTreeRoot, err := ssz.HashTreeRoot(atts[i].Data) + hashTreeRoot, err := helpers.ComputeSigningRoot(atts[i].Data, domain) if err != nil { t.Fatal(err) } - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } atts[i].Signature = bls.AggregateSignatures(sigs).Marshal()[:] @@ -1565,7 +1586,7 @@ func TestDeleteAttsInPool_Aggregated(t *testing.T) { AttPool: attestations.NewPool(), } - sig := bls.RandKey().Sign([]byte("foo"), 0).Marshal() + sig := bls.RandKey().Sign([]byte("foo")).Marshal() aggregatedAtts := []*ethpb.Attestation{{AggregationBits: bitfield.Bitlist{0b10101}, Signature: sig}, {AggregationBits: bitfield.Bitlist{0b11010}, Signature: sig}} unaggregatedAtts := []*ethpb.Attestation{{AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig}, {AggregationBits: bitfield.Bitlist{0b0001}, Signature: sig}} diff --git a/beacon-chain/rpc/validator/server.go b/beacon-chain/rpc/validator/server.go index 2ed7b40aeb..648662c615 100644 --- a/beacon-chain/rpc/validator/server.go +++ b/beacon-chain/rpc/validator/server.go @@ -129,7 +129,11 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR // DomainData fetches the current domain version information from the beacon state. func (vs *Server) DomainData(ctx context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) { fork := vs.ForkFetcher.CurrentFork() - dv, err := helpers.Domain(fork, request.Epoch, bytesutil.ToBytes4(request.Domain)) + s, err := vs.HeadFetcher.HeadState(ctx) + if err != nil { + return nil, err + } + dv, err := helpers.Domain(fork, request.Epoch, bytesutil.ToBytes4(request.Domain), s.GenesisValidatorRoot()) if err != nil { return nil, err } @@ -191,3 +195,8 @@ func (vs *Server) WaitForChainStart(req *ptypes.Empty, stream ethpb.BeaconNodeVa } } } + +// WaitForSynced is to be implemented. +func (vs *Server) WaitForSynced(_ *ptypes.Empty, stream ethpb.BeaconNodeValidator_WaitForSyncedServer) error { + return status.Error(codes.Unimplemented, "not implemented") +} diff --git a/beacon-chain/rpc/validator/server_test.go b/beacon-chain/rpc/validator/server_test.go index 5def734aea..8277d00b98 100644 --- a/beacon-chain/rpc/validator/server_test.go +++ b/beacon-chain/rpc/validator/server_test.go @@ -16,6 +16,7 @@ import ( blk "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing" internal "github.com/prysmaticlabs/prysm/beacon-chain/rpc/testing" @@ -39,19 +40,14 @@ func TestValidatorIndex_OK(t *testing.T) { db := dbutil.SetupDB(t) defer dbutil.TeardownDB(t, db) ctx := context.Background() - st, err := stateTrie.InitializeFromProtoUnsafe(&pbp2p.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() if err := db.SaveState(ctx, st.Copy(), [32]byte{}); err != nil { t.Fatal(err) } pubKey := pubKey(1) - err = st.SetValidators([]*ethpb.Validator{ - ðpb.Validator{PublicKey: pubKey}, - }) + err := st.SetValidators([]*ethpb.Validator{{PublicKey: pubKey}}) if err != nil { t.Fatal(err) } @@ -114,7 +110,7 @@ func TestWaitForActivation_ContextClosed(t *testing.T) { exitRoutine := make(chan bool) go func(tt *testing.T) { want := "context canceled" - if err := vs.WaitForActivation(req, mockChainStream); !strings.Contains(err.Error(), want) { + if err := vs.WaitForActivation(req, mockChainStream); err == nil || !strings.Contains(err.Error(), want) { tt.Errorf("Could not call RPC method: %v", err) } <-exitRoutine @@ -156,12 +152,15 @@ func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) { PublicKey: pubKey1, WithdrawalCredentials: []byte("hey"), } - signingRoot, err := ssz.HashTreeRoot(depData) + domain, err := helpers.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil) + if err != nil { + t.Fatal(err) + } + signingRoot, err := helpers.ComputeSigningRoot(depData, domain) if err != nil { t.Error(err) } - domain := bls.ComputeDomain(params.BeaconConfig().DomainDeposit) - depData.Signature = priv1.Sign(signingRoot[:], domain).Marshal()[:] + depData.Signature = priv1.Sign(signingRoot[:]).Marshal()[:] deposit := ðpb.Deposit{ Data: depData, @@ -240,7 +239,7 @@ func TestWaitForChainStart_ContextClosed(t *testing.T) { defer ctrl.Finish() mockStream := mockRPC.NewMockBeaconNodeValidator_WaitForChainStartServer(ctrl) go func(tt *testing.T) { - if err := Server.WaitForChainStart(&ptypes.Empty{}, mockStream); !strings.Contains(err.Error(), "Context canceled") { + if err := Server.WaitForChainStart(&ptypes.Empty{}, mockStream); err == nil || !strings.Contains(err.Error(), "Context canceled") { tt.Errorf("Could not call RPC method: %v", err) } <-exitRoutine @@ -254,8 +253,8 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) { defer dbutil.TeardownDB(t, db) ctx := context.Background() headBlockRoot := [32]byte{0x01, 0x02} - trie, err := stateTrie.InitializeFromProtoUnsafe(&pbp2p.BeaconState{Slot: 3}) - if err != nil { + trie := testutil.NewBeaconState() + if err := trie.SetSlot(3); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, trie, headBlockRoot); err != nil { diff --git a/beacon-chain/rpc/validator/status.go b/beacon-chain/rpc/validator/status.go index 19b54dbcea..bff8e7237f 100644 --- a/beacon-chain/rpc/validator/status.go +++ b/beacon-chain/rpc/validator/status.go @@ -197,8 +197,8 @@ func (vs *Server) depositBlockSlot(ctx context.Context, eth1BlockNumBigInt *big. } followTime := time.Duration(params.BeaconConfig().Eth1FollowDistance*params.BeaconConfig().GoerliBlockTime) * time.Second eth1UnixTime := time.Unix(int64(blockTimeStamp), 0).Add(followTime) - - votingPeriod := time.Duration(params.BeaconConfig().SlotsPerEth1VotingPeriod*params.BeaconConfig().SecondsPerSlot) * time.Second + period := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().EpochsPerEth1VotingPeriod + votingPeriod := time.Duration(period*params.BeaconConfig().SecondsPerSlot) * time.Second timeToInclusion := eth1UnixTime.Add(votingPeriod) eth2Genesis := time.Unix(int64(beaconState.GenesisTime()), 0) diff --git a/beacon-chain/rpc/validator/status_test.go b/beacon-chain/rpc/validator/status_test.go index 5a25ecad0d..ddb199bb4b 100644 --- a/beacon-chain/rpc/validator/status_test.go +++ b/beacon-chain/rpc/validator/status_test.go @@ -88,18 +88,18 @@ func TestValidatorStatus_Pending(t *testing.T) { t.Fatalf("Could not get signing root %v", err) } // Pending active because activation epoch is still defaulted at far future slot. - state, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Validators: []*ethpb.Validator{ - { - ActivationEpoch: params.BeaconConfig().FarFutureEpoch, - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, - PublicKey: pubKey, - }, + state := testutil.NewBeaconState() + if err := state.SetSlot(5000); err != nil { + t.Fatal(err) + } + if err := state.SetValidators([]*ethpb.Validator{ + { + ActivationEpoch: params.BeaconConfig().FarFutureEpoch, + ExitEpoch: params.BeaconConfig().FarFutureEpoch, + WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, + PublicKey: pubKey, }, - Slot: 5000, - }) - if err != nil { + }); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, state, genesisRoot); err != nil { @@ -398,7 +398,9 @@ func TestValidatorStatus_Exited(t *testing.T) { if err != nil { t.Fatalf("Could not get signing root %v", err) } - numDeposits := params.BeaconConfig().MinGenesisActiveValidatorCount + params.OverrideBeaconConfig(params.MainnetConfig()) + defer params.OverrideBeaconConfig(params.MinimalSpecConfig()) + numDeposits := uint64(64) beaconState, _ := testutil.DeterministicGenesisState(t, numDeposits) if err := db.SaveState(ctx, beaconState, genesisRoot); err != nil { t.Fatal(err) @@ -406,14 +408,14 @@ func TestValidatorStatus_Exited(t *testing.T) { if err := db.SaveHeadBlockRoot(ctx, genesisRoot); err != nil { t.Fatalf("Could not save genesis state: %v", err) } - state, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{ - Slot: slot, - Validators: []*ethpb.Validator{{ - PublicKey: pubKey, - WithdrawableEpoch: epoch + 1}, - }, - }) - if err != nil { + state := testutil.NewBeaconState() + if err := state.SetSlot(slot); err != nil { + t.Fatal(err) + } + if err := state.SetValidators([]*ethpb.Validator{{ + PublicKey: pubKey, + WithdrawableEpoch: epoch + 1}, + }); err != nil { t.Fatal(err) } depData := ðpb.Deposit_Data{ @@ -592,48 +594,49 @@ func TestValidatorStatus_CorrectActivationQueue(t *testing.T) { } currentSlot := uint64(5000) // Pending active because activation epoch is still defaulted at far future slot. - state, err := stateTrie.InitializeFromProtoUnsafe(&pbp2p.BeaconState{ - Validators: []*ethpb.Validator{ - { - ActivationEpoch: 0, - PublicKey: pubKey(0), - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, - }, - { - ActivationEpoch: 0, - PublicKey: pubKey(1), - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, - }, - { - ActivationEpoch: 0, - PublicKey: pubKey(2), - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, - }, - { - ActivationEpoch: 0, - PublicKey: pubKey(3), - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, - }, - { - ActivationEpoch: currentSlot/params.BeaconConfig().SlotsPerEpoch + 1, - PublicKey: pbKey, - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, - }, - { - ActivationEpoch: currentSlot/params.BeaconConfig().SlotsPerEpoch + 4, - PublicKey: pubKey(5), - ExitEpoch: params.BeaconConfig().FarFutureEpoch, - WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, - }, + validators := []*ethpb.Validator{ + { + ActivationEpoch: 0, + PublicKey: pubKey(0), + ExitEpoch: params.BeaconConfig().FarFutureEpoch, + WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, }, - Slot: currentSlot, - }) - if err != nil { + { + ActivationEpoch: 0, + PublicKey: pubKey(1), + ExitEpoch: params.BeaconConfig().FarFutureEpoch, + WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, + }, + { + ActivationEpoch: 0, + PublicKey: pubKey(2), + ExitEpoch: params.BeaconConfig().FarFutureEpoch, + WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, + }, + { + ActivationEpoch: 0, + PublicKey: pubKey(3), + ExitEpoch: params.BeaconConfig().FarFutureEpoch, + WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, + }, + { + ActivationEpoch: currentSlot/params.BeaconConfig().SlotsPerEpoch + 1, + PublicKey: pbKey, + ExitEpoch: params.BeaconConfig().FarFutureEpoch, + WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, + }, + { + ActivationEpoch: currentSlot/params.BeaconConfig().SlotsPerEpoch + 4, + PublicKey: pubKey(5), + ExitEpoch: params.BeaconConfig().FarFutureEpoch, + WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch, + }, + } + state := testutil.NewBeaconState() + if err := state.SetValidators(validators); err != nil { + t.Fatal(err) + } + if err := state.SetSlot(currentSlot); err != nil { t.Fatal(err) } if err := db.SaveState(ctx, state, genesisRoot); err != nil { diff --git a/beacon-chain/state/cloners.go b/beacon-chain/state/cloners.go index 57b349f2d1..88957290cc 100644 --- a/beacon-chain/state/cloners.go +++ b/beacon-chain/state/cloners.go @@ -86,10 +86,11 @@ func CopyBeaconBlock(block *ethpb.BeaconBlock) *ethpb.BeaconBlock { return nil } return ðpb.BeaconBlock{ - Slot: block.Slot, - ParentRoot: bytesutil.SafeCopyBytes(block.ParentRoot), - StateRoot: bytesutil.SafeCopyBytes(block.StateRoot), - Body: CopyBeaconBlockBody(block.Body), + Slot: block.Slot, + ProposerIndex: block.ProposerIndex, + ParentRoot: bytesutil.SafeCopyBytes(block.ParentRoot), + StateRoot: bytesutil.SafeCopyBytes(block.StateRoot), + Body: CopyBeaconBlockBody(block.Body), } } @@ -128,9 +129,8 @@ func CopyProposerSlashing(slashing *ethpb.ProposerSlashing) *ethpb.ProposerSlash return nil } return ðpb.ProposerSlashing{ - ProposerIndex: slashing.ProposerIndex, - Header_1: CopySignedBeaconBlockHeader(slashing.Header_1), - Header_2: CopySignedBeaconBlockHeader(slashing.Header_2), + Header_1: CopySignedBeaconBlockHeader(slashing.Header_1), + Header_2: CopySignedBeaconBlockHeader(slashing.Header_2), } } @@ -154,10 +154,11 @@ func CopyBeaconBlockHeader(header *ethpb.BeaconBlockHeader) *ethpb.BeaconBlockHe stateRoot := bytesutil.SafeCopyBytes(header.StateRoot) bodyRoot := bytesutil.SafeCopyBytes(header.BodyRoot) return ðpb.BeaconBlockHeader{ - Slot: header.Slot, - ParentRoot: parentRoot[:], - StateRoot: stateRoot[:], - BodyRoot: bodyRoot[:], + Slot: header.Slot, + ProposerIndex: header.ProposerIndex, + ParentRoot: parentRoot[:], + StateRoot: stateRoot[:], + BodyRoot: bodyRoot[:], } } diff --git a/beacon-chain/state/field_trie_test.go b/beacon-chain/state/field_trie_test.go index 2fd82bbd10..b0f3abbd01 100644 --- a/beacon-chain/state/field_trie_test.go +++ b/beacon-chain/state/field_trie_test.go @@ -31,7 +31,7 @@ func TestFieldTrie_NewTrie(t *testing.T) { func TestFieldTrie_RecomputeTrie(t *testing.T) { newState, _ := testutil.DeterministicGenesisState(t, 32) // 10 represents the enum value of validators - trie, err := state.NewFieldTrie(10, newState.Validators(), params.BeaconConfig().ValidatorRegistryLimit) + trie, err := state.NewFieldTrie(11, newState.Validators(), params.BeaconConfig().ValidatorRegistryLimit) if err != nil { t.Fatal(err) } @@ -75,7 +75,7 @@ func TestFieldTrie_RecomputeTrie(t *testing.T) { func TestFieldTrie_CopyTrieImmutable(t *testing.T) { newState, _ := testutil.DeterministicGenesisState(t, 32) // 12 represents the enum value of randao mixes. - trie, err := state.NewFieldTrie(12, newState.RandaoMixes(), params.BeaconConfig().EpochsPerHistoricalVector) + trie, err := state.NewFieldTrie(13, newState.RandaoMixes(), params.BeaconConfig().EpochsPerHistoricalVector) if err != nil { t.Fatal(err) } diff --git a/beacon-chain/state/getters.go b/beacon-chain/state/getters.go index cbf215292c..361d4e2f75 100644 --- a/beacon-chain/state/getters.go +++ b/beacon-chain/state/getters.go @@ -10,6 +10,7 @@ import ( pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/memorypool" + "github.com/prysmaticlabs/prysm/shared/params" ) // EffectiveBalance returns the effective balance of the @@ -94,6 +95,7 @@ func (b *BeaconState) CloneInnerState() *pbp2p.BeaconState { } return &pbp2p.BeaconState{ GenesisTime: b.GenesisTime(), + GenesisValidatorsRoot: b.GenesisValidatorRoot(), Slot: b.Slot(), Fork: b.Fork(), LatestBlockHeader: b.LatestBlockHeader(), @@ -130,6 +132,21 @@ func (b *BeaconState) GenesisTime() uint64 { return b.state.GenesisTime } +// GenesisValidatorRoot of the beacon state. +func (b *BeaconState) GenesisValidatorRoot() []byte { + if !b.HasInnerState() { + return nil + } + + if b.state.GenesisValidatorsRoot == nil { + return params.BeaconConfig().ZeroHash[:] + } + + root := make([]byte, 32) + copy(root, b.state.GenesisValidatorsRoot) + return root +} + // GenesisUnixTime returns the genesis time as time.Time. func (b *BeaconState) GenesisUnixTime() time.Time { if !b.HasInnerState() { @@ -185,7 +202,8 @@ func (b *BeaconState) LatestBlockHeader() *ethpb.BeaconBlockHeader { defer b.lock.RUnlock() hdr := ðpb.BeaconBlockHeader{ - Slot: b.state.LatestBlockHeader.Slot, + Slot: b.state.LatestBlockHeader.Slot, + ProposerIndex: b.state.LatestBlockHeader.ProposerIndex, } parentRoot := make([]byte, len(b.state.LatestBlockHeader.ParentRoot)) diff --git a/beacon-chain/state/setters.go b/beacon-chain/state/setters.go index 6b95e901ad..113b98e1ff 100644 --- a/beacon-chain/state/setters.go +++ b/beacon-chain/state/setters.go @@ -22,6 +22,16 @@ func (b *BeaconState) SetGenesisTime(val uint64) error { return nil } +// SetGenesisValidatorRoot for the beacon state. +func (b *BeaconState) SetGenesisValidatorRoot(val []byte) error { + b.lock.Lock() + defer b.lock.Unlock() + + b.state.GenesisValidatorsRoot = val + b.markFieldAsDirty(genesisValidatorRoot) + return nil +} + // SetSlot for the beacon state. func (b *BeaconState) SetSlot(val uint64) error { if !b.HasInnerState() { diff --git a/beacon-chain/state/state_trie.go b/beacon-chain/state/state_trie.go index aaa2c89b24..1e3329579c 100644 --- a/beacon-chain/state/state_trie.go +++ b/beacon-chain/state/state_trie.go @@ -30,15 +30,15 @@ func InitializeFromProto(st *pbp2p.BeaconState) (*BeaconState, error) { func InitializeFromProtoUnsafe(st *pbp2p.BeaconState) (*BeaconState, error) { b := &BeaconState{ state: st, - dirtyFields: make(map[fieldIndex]interface{}, 20), - dirtyIndices: make(map[fieldIndex][]uint64, 20), - stateFieldLeaves: make(map[fieldIndex]*FieldTrie, 20), + dirtyFields: make(map[fieldIndex]interface{}, 21), + dirtyIndices: make(map[fieldIndex][]uint64, 21), + stateFieldLeaves: make(map[fieldIndex]*FieldTrie, 21), sharedFieldReferences: make(map[fieldIndex]*reference, 10), - rebuildTrie: make(map[fieldIndex]bool, 20), + rebuildTrie: make(map[fieldIndex]bool, 21), valIdxMap: coreutils.ValidatorIndexMap(st.Validators), } - for i := 0; i < 20; i++ { + for i := 0; i < 21; i++ { b.dirtyFields[fieldIndex(i)] = true b.rebuildTrie[fieldIndex(i)] = true b.dirtyIndices[fieldIndex(i)] = []uint64{} @@ -101,12 +101,13 @@ func (b *BeaconState) Copy() *BeaconState { PreviousJustifiedCheckpoint: b.PreviousJustifiedCheckpoint(), CurrentJustifiedCheckpoint: b.CurrentJustifiedCheckpoint(), FinalizedCheckpoint: b.FinalizedCheckpoint(), + GenesisValidatorsRoot: b.GenesisValidatorRoot(), }, - dirtyFields: make(map[fieldIndex]interface{}, 20), - dirtyIndices: make(map[fieldIndex][]uint64, 20), - rebuildTrie: make(map[fieldIndex]bool, 20), + dirtyFields: make(map[fieldIndex]interface{}, 21), + dirtyIndices: make(map[fieldIndex][]uint64, 21), + rebuildTrie: make(map[fieldIndex]bool, 21), sharedFieldReferences: make(map[fieldIndex]*reference, 10), - stateFieldLeaves: make(map[fieldIndex]*FieldTrie, 20), + stateFieldLeaves: make(map[fieldIndex]*FieldTrie, 21), // Copy on write validator index map. valIdxMap: b.valIdxMap, @@ -192,7 +193,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) { } layers := merkleize(fieldRoots) b.merkleLayers = layers - b.dirtyFields = make(map[fieldIndex]interface{}) + b.dirtyFields = make(map[fieldIndex]interface{}, 21) } for field := range b.dirtyFields { @@ -243,6 +244,8 @@ func (b *BeaconState) rootSelector(field fieldIndex) ([32]byte, error) { switch field { case genesisTime: return stateutil.Uint64Root(b.state.GenesisTime), nil + case genesisValidatorRoot: + return bytesutil.ToBytes32(b.state.GenesisValidatorsRoot), nil case slot: return stateutil.Uint64Root(b.state.Slot), nil case eth1DepositIndex: @@ -286,7 +289,7 @@ func (b *BeaconState) rootSelector(field fieldIndex) ([32]byte, error) { case eth1DataVotes: if featureconfig.Get().EnableFieldTrie { if b.rebuildTrie[field] { - err := b.resetFieldTrie(field, b.state.Eth1DataVotes, params.BeaconConfig().SlotsPerEth1VotingPeriod) + err := b.resetFieldTrie(field, b.state.Eth1DataVotes, params.BeaconConfig().EpochsPerEth1VotingPeriod*params.BeaconConfig().SlotsPerEpoch) if err != nil { return [32]byte{}, err } diff --git a/beacon-chain/state/stategen/BUILD.bazel b/beacon-chain/state/stategen/BUILD.bazel index 8cfd418311..d53513a44c 100644 --- a/beacon-chain/state/stategen/BUILD.bazel +++ b/beacon-chain/state/stategen/BUILD.bazel @@ -51,7 +51,6 @@ go_test( "//beacon-chain/core/blocks:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/db/testing:go_default_library", - "//beacon-chain/state:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//shared/bytesutil:go_default_library", "//shared/params:go_default_library", diff --git a/beacon-chain/state/stategen/getter.go b/beacon-chain/state/stategen/getter.go index e48df555a0..afd20089e8 100644 --- a/beacon-chain/state/stategen/getter.go +++ b/beacon-chain/state/stategen/getter.go @@ -50,8 +50,8 @@ func (s *State) StateBySlot(ctx context.Context, slot uint64) (*state.BeaconStat return s.loadHotStateBySlot(ctx, slot) } -// StateSummaryExists returns true if the corresponding state of the input block either -// exists in the DB or it can be generated by state gen. +// StateSummaryExists returns true if the corresponding state summary of the input block root either +// exists in the DB or in the cache. func (s *State) StateSummaryExists(ctx context.Context, blockRoot [32]byte) bool { return s.beaconDB.HasStateSummary(ctx, blockRoot) || s.stateSummaryCache.Has(blockRoot) } diff --git a/beacon-chain/state/stategen/replay_test.go b/beacon-chain/state/stategen/replay_test.go index fed19de17e..88f61fae28 100644 --- a/beacon-chain/state/stategen/replay_test.go +++ b/beacon-chain/state/stategen/replay_test.go @@ -12,7 +12,6 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/db" testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" - stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/params" @@ -509,10 +508,11 @@ func TestLastSavedState_CanGet(t *testing.T) { if err != nil { t.Fatal(err) } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{Slot: s.splitInfo.slot + 10}) - if err != nil { + st := testutil.NewBeaconState() + if err := st.SetSlot(s.splitInfo.slot + 10); err != nil { t.Fatal(err) } + if err := s.beaconDB.SaveState(ctx, st, b2Root); err != nil { t.Fatal(err) } @@ -600,10 +600,8 @@ func tree1(db db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.BeaconBlock if err != nil { return nil, nil, err } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - return nil, nil, err - } + st := testutil.NewBeaconState() + for _, b := range []*ethpb.BeaconBlock{b0, b1, b2, b3, b4, b5, b6, b7, b8} { if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil { return nil, nil, err @@ -657,10 +655,7 @@ func tree2(db db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.BeaconBlock if err != nil { return nil, nil, err } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - return nil, nil, err - } + st := testutil.NewBeaconState() for _, b := range []*ethpb.BeaconBlock{b0, b1, b21, b22, b23, b24, b3} { if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil { @@ -710,10 +705,7 @@ func tree3(db db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.BeaconBlock if err != nil { return nil, nil, err } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - return nil, nil, err - } + st := testutil.NewBeaconState() for _, b := range []*ethpb.BeaconBlock{b0, b1, b21, b22, b23, b24} { if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil { @@ -759,10 +751,7 @@ func tree4(db db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.BeaconBlock if err != nil { return nil, nil, err } - st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{}) - if err != nil { - return nil, nil, err - } + st := testutil.NewBeaconState() for _, b := range []*ethpb.BeaconBlock{b0, b21, b22, b23, b24} { if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil { diff --git a/beacon-chain/state/stategen/service.go b/beacon-chain/state/stategen/service.go index 5852826911..315be021b7 100644 --- a/beacon-chain/state/stategen/service.go +++ b/beacon-chain/state/stategen/service.go @@ -5,7 +5,6 @@ import ( "sync" "github.com/prysmaticlabs/prysm/beacon-chain/cache" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/db" "github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/shared/params" @@ -61,12 +60,6 @@ func (s *State) Resume(ctx context.Context) (*state.BeaconState, error) { s.splitInfo = &splitSlotAndRoot{slot: lastArchivedState.Slot(), root: lastArchivedRoot} - // In case the finalized state slot was skipped. - slot := lastArchivedState.Slot() - if !helpers.IsEpochStart(slot) { - slot = helpers.StartSlot(helpers.SlotToEpoch(slot) + 1) - } - return lastArchivedState, nil } diff --git a/beacon-chain/state/stateutil/BUILD.bazel b/beacon-chain/state/stateutil/BUILD.bazel index 772a6863e8..544fa1e572 100644 --- a/beacon-chain/state/stateutil/BUILD.bazel +++ b/beacon-chain/state/stateutil/BUILD.bazel @@ -17,6 +17,7 @@ go_library( visibility = [ "//beacon-chain:__subpackages__", "//proto/testing:__subpackages__", + "//shared/testutil:__subpackages__", ], deps = [ "//proto/beacon/p2p/v1:go_default_library", diff --git a/beacon-chain/state/stateutil/blocks.go b/beacon-chain/state/stateutil/blocks.go index 194f360086..b51cde1e12 100644 --- a/beacon-chain/state/stateutil/blocks.go +++ b/beacon-chain/state/stateutil/blocks.go @@ -17,18 +17,22 @@ import ( // a BeaconBlockHeader struct according to the eth2 // Simple Serialize specification. func BlockHeaderRoot(header *ethpb.BeaconBlockHeader) ([32]byte, error) { - fieldRoots := make([][]byte, 4) + fieldRoots := make([][]byte, 5) if header != nil { headerSlotBuf := make([]byte, 8) binary.LittleEndian.PutUint64(headerSlotBuf, header.Slot) headerSlotRoot := bytesutil.ToBytes32(headerSlotBuf) fieldRoots[0] = headerSlotRoot[:] + proposerIdxBuf := make([]byte, 8) + binary.LittleEndian.PutUint64(proposerIdxBuf, header.ProposerIndex) + proposerIndexRoot := bytesutil.ToBytes32(proposerIdxBuf) + fieldRoots[1] = proposerIndexRoot[:] parentRoot := bytesutil.ToBytes32(header.ParentRoot) - fieldRoots[1] = parentRoot[:] + fieldRoots[2] = parentRoot[:] stateRoot := bytesutil.ToBytes32(header.StateRoot) - fieldRoots[2] = stateRoot[:] + fieldRoots[3] = stateRoot[:] bodyRoot := bytesutil.ToBytes32(header.BodyRoot) - fieldRoots[3] = bodyRoot[:] + fieldRoots[4] = bodyRoot[:] } return bitwiseMerkleize(hashutil.CustomSHA256Hasher(), fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots))) } @@ -38,21 +42,25 @@ func BlockRoot(blk *ethpb.BeaconBlock) ([32]byte, error) { if !featureconfig.Get().EnableBlockHTR { return ssz.HashTreeRoot(blk) } - fieldRoots := make([][32]byte, 4) + fieldRoots := make([][32]byte, 5) if blk != nil { headerSlotBuf := make([]byte, 8) binary.LittleEndian.PutUint64(headerSlotBuf, blk.Slot) headerSlotRoot := bytesutil.ToBytes32(headerSlotBuf) fieldRoots[0] = headerSlotRoot + proposerIdxBuf := make([]byte, 8) + binary.LittleEndian.PutUint64(proposerIdxBuf, blk.ProposerIndex) + proposerIndexRoot := bytesutil.ToBytes32(proposerIdxBuf) + fieldRoots[1] = proposerIndexRoot parentRoot := bytesutil.ToBytes32(blk.ParentRoot) - fieldRoots[1] = parentRoot + fieldRoots[2] = parentRoot stateRoot := bytesutil.ToBytes32(blk.StateRoot) - fieldRoots[2] = stateRoot + fieldRoots[3] = stateRoot bodyRoot, err := BlockBodyRoot(blk.Body) if err != nil { return [32]byte{}, err } - fieldRoots[3] = bodyRoot + fieldRoots[4] = bodyRoot } return bitwiseMerkleizeArrays(hashutil.CustomSHA256Hasher(), fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots))) } @@ -182,7 +190,12 @@ func Eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) { if err != nil { return [32]byte{}, errors.Wrap(err, "could not chunk eth1 votes roots") } - eth1VotesRootsRoot, err := bitwiseMerkleize(hasher, eth1Chunks, uint64(len(eth1Chunks)), params.BeaconConfig().SlotsPerEth1VotingPeriod) + eth1VotesRootsRoot, err := bitwiseMerkleize( + hasher, + eth1Chunks, + uint64(len(eth1Chunks)), + params.BeaconConfig().EpochsPerEth1VotingPeriod*params.BeaconConfig().SlotsPerEpoch, + ) if err != nil { return [32]byte{}, errors.Wrap(err, "could not compute eth1data votes merkleization") } diff --git a/beacon-chain/state/stateutil/blocks_test.go b/beacon-chain/state/stateutil/blocks_test.go index 01341f5409..72bacf4671 100644 --- a/beacon-chain/state/stateutil/blocks_test.go +++ b/beacon-chain/state/stateutil/blocks_test.go @@ -5,10 +5,14 @@ import ( "github.com/prysmaticlabs/go-ssz" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/testutil" ) func TestBlockRoot(t *testing.T) { + f := featureconfig.Get() + f.EnableBlockHTR = true + featureconfig.Init(f) genState, keys := testutil.DeterministicGenesisState(t, 100) blk, err := testutil.GenerateFullBlock(genState, keys, testutil.DefaultBlockGenConfig(), 10) if err != nil { diff --git a/beacon-chain/state/stateutil/state_root.go b/beacon-chain/state/stateutil/state_root.go index 7a3ab01488..db41f8817e 100644 --- a/beacon-chain/state/stateutil/state_root.go +++ b/beacon-chain/state/stateutil/state_root.go @@ -82,138 +82,143 @@ func (h *stateRootHasher) computeFieldRoots(state *pb.BeaconState) ([][]byte, er return nil, errors.New("nil state") } hasher := hashutil.CustomSHA256Hasher() - // There are 20 fields in the beacon state. - fieldRoots := make([][]byte, 20) + // There are 21 fields in the beacon state. + fieldRoots := make([][]byte, 21) // Genesis time root. genesisRoot := Uint64Root(state.GenesisTime) fieldRoots[0] = genesisRoot[:] + // Genesis validator root. + r := [32]byte{} + copy(r[:], state.GenesisValidatorsRoot) + fieldRoots[1] = r[:] + // Slot root. slotRoot := Uint64Root(state.Slot) - fieldRoots[1] = slotRoot[:] + fieldRoots[2] = slotRoot[:] // Fork data structure root. forkHashTreeRoot, err := ForkRoot(state.Fork) if err != nil { return nil, errors.Wrap(err, "could not compute fork merkleization") } - fieldRoots[2] = forkHashTreeRoot[:] + fieldRoots[3] = forkHashTreeRoot[:] // BeaconBlockHeader data structure root. headerHashTreeRoot, err := BlockHeaderRoot(state.LatestBlockHeader) if err != nil { return nil, errors.Wrap(err, "could not compute block header merkleization") } - fieldRoots[3] = headerHashTreeRoot[:] + fieldRoots[4] = headerHashTreeRoot[:] // BlockRoots array root. blockRootsRoot, err := h.arraysRoot(state.BlockRoots, params.BeaconConfig().SlotsPerHistoricalRoot, "BlockRoots") if err != nil { return nil, errors.Wrap(err, "could not compute block roots merkleization") } - fieldRoots[4] = blockRootsRoot[:] + fieldRoots[5] = blockRootsRoot[:] // StateRoots array root. stateRootsRoot, err := h.arraysRoot(state.StateRoots, params.BeaconConfig().SlotsPerHistoricalRoot, "StateRoots") if err != nil { return nil, errors.Wrap(err, "could not compute state roots merkleization") } - fieldRoots[5] = stateRootsRoot[:] + fieldRoots[6] = stateRootsRoot[:] // HistoricalRoots slice root. historicalRootsRt, err := HistoricalRootsRoot(state.HistoricalRoots) if err != nil { return nil, errors.Wrap(err, "could not compute historical roots merkleization") } - fieldRoots[6] = historicalRootsRt[:] + fieldRoots[7] = historicalRootsRt[:] // Eth1Data data structure root. eth1HashTreeRoot, err := Eth1Root(hasher, state.Eth1Data) if err != nil { return nil, errors.Wrap(err, "could not compute eth1data merkleization") } - fieldRoots[7] = eth1HashTreeRoot[:] + fieldRoots[8] = eth1HashTreeRoot[:] // Eth1DataVotes slice root. eth1VotesRoot, err := Eth1DataVotesRoot(state.Eth1DataVotes) if err != nil { return nil, errors.Wrap(err, "could not compute eth1data votes merkleization") } - fieldRoots[8] = eth1VotesRoot[:] + fieldRoots[9] = eth1VotesRoot[:] // Eth1DepositIndex root. eth1DepositIndexBuf := make([]byte, 8) binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex) eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf) - fieldRoots[9] = eth1DepositBuf[:] + fieldRoots[10] = eth1DepositBuf[:] // Validators slice root. validatorsRoot, err := h.validatorRegistryRoot(state.Validators) if err != nil { return nil, errors.Wrap(err, "could not compute validator registry merkleization") } - fieldRoots[10] = validatorsRoot[:] + fieldRoots[11] = validatorsRoot[:] // Balances slice root. balancesRoot, err := ValidatorBalancesRoot(state.Balances) if err != nil { return nil, errors.Wrap(err, "could not compute validator balances merkleization") } - fieldRoots[11] = balancesRoot[:] + fieldRoots[12] = balancesRoot[:] // RandaoMixes array root. randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, params.BeaconConfig().EpochsPerHistoricalVector, "RandaoMixes") if err != nil { return nil, errors.Wrap(err, "could not compute randao roots merkleization") } - fieldRoots[12] = randaoRootsRoot[:] + fieldRoots[13] = randaoRootsRoot[:] // Slashings array root. slashingsRootsRoot, err := SlashingsRoot(state.Slashings) if err != nil { return nil, errors.Wrap(err, "could not compute slashings merkleization") } - fieldRoots[13] = slashingsRootsRoot[:] + fieldRoots[14] = slashingsRootsRoot[:] // PreviousEpochAttestations slice root. prevAttsRoot, err := h.epochAttestationsRoot(state.PreviousEpochAttestations) if err != nil { return nil, errors.Wrap(err, "could not compute previous epoch attestations merkleization") } - fieldRoots[14] = prevAttsRoot[:] + fieldRoots[15] = prevAttsRoot[:] // CurrentEpochAttestations slice root. currAttsRoot, err := h.epochAttestationsRoot(state.CurrentEpochAttestations) if err != nil { return nil, errors.Wrap(err, "could not compute previous epoch attestations merkleization") } - fieldRoots[15] = currAttsRoot[:] + fieldRoots[16] = currAttsRoot[:] // JustificationBits root. justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits) - fieldRoots[16] = justifiedBitsRoot[:] + fieldRoots[17] = justifiedBitsRoot[:] // PreviousJustifiedCheckpoint data structure root. prevCheckRoot, err := CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint) if err != nil { return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization") } - fieldRoots[17] = prevCheckRoot[:] + fieldRoots[18] = prevCheckRoot[:] // CurrentJustifiedCheckpoint data structure root. currJustRoot, err := CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint) if err != nil { return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization") } - fieldRoots[18] = currJustRoot[:] + fieldRoots[19] = currJustRoot[:] // FinalizedCheckpoint data structure root. finalRoot, err := CheckpointRoot(hasher, state.FinalizedCheckpoint) if err != nil { return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization") } - fieldRoots[19] = finalRoot[:] + fieldRoots[20] = finalRoot[:] return fieldRoots, nil } diff --git a/beacon-chain/state/stateutil/state_root_test.go b/beacon-chain/state/stateutil/state_root_test.go index 272fb760a2..6eaf0f538c 100644 --- a/beacon-chain/state/stateutil/state_root_test.go +++ b/beacon-chain/state/stateutil/state_root_test.go @@ -23,7 +23,7 @@ func init() { } func TestState_FieldCount(t *testing.T) { - count := 20 + count := 21 typ := reflect.TypeOf(pb.BeaconState{}) numFields := 0 for i := 0; i < typ.NumField(); i++ { diff --git a/beacon-chain/state/types.go b/beacon-chain/state/types.go index 1cc1f5bd09..cf13a22275 100644 --- a/beacon-chain/state/types.go +++ b/beacon-chain/state/types.go @@ -35,6 +35,7 @@ type dataType int // of the beacon state. const ( genesisTime fieldIndex = iota + genesisValidatorRoot slot fork latestBlockHeader diff --git a/beacon-chain/sync/BUILD.bazel b/beacon-chain/sync/BUILD.bazel index bc54e2363f..ca6884522a 100644 --- a/beacon-chain/sync/BUILD.bazel +++ b/beacon-chain/sync/BUILD.bazel @@ -16,6 +16,8 @@ go_library( "rpc_beacon_blocks_by_root.go", "rpc_chunked_response.go", "rpc_goodbye.go", + "rpc_metadata.go", + "rpc_ping.go", "rpc_status.go", "service.go", "subscriber.go", @@ -51,12 +53,14 @@ go_library( "//beacon-chain/p2p:go_default_library", "//beacon-chain/p2p/encoder:go_default_library", "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stategen:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//shared:go_default_library", "//shared/attestationutil:go_default_library", "//shared/bls:go_default_library", "//shared/bytesutil:go_default_library", "//shared/featureconfig:go_default_library", + "//shared/hashutil:go_default_library", "//shared/messagehandler:go_default_library", "//shared/params:go_default_library", "//shared/roughtime:go_default_library", @@ -65,6 +69,7 @@ go_library( "//shared/slotutil:go_default_library", "//shared/traceutil:go_default_library", "@com_github_gogo_protobuf//proto:go_default_library", + "@com_github_hashicorp_golang_lru//:go_default_library", "@com_github_kevinms_leakybucket_go//:go_default_library", "@com_github_libp2p_go_libp2p_core//:go_default_library", "@com_github_libp2p_go_libp2p_core//network:go_default_library", @@ -91,8 +96,11 @@ go_test( "rpc_beacon_blocks_by_range_test.go", "rpc_beacon_blocks_by_root_test.go", "rpc_goodbye_test.go", + "rpc_metadata_test.go", + "rpc_ping_test.go", "rpc_status_test.go", "rpc_test.go", + "service_test.go", "subscriber_beacon_aggregate_proof_test.go", "subscriber_beacon_blocks_test.go", "subscriber_committee_index_beacon_attestation_test.go", @@ -121,16 +129,20 @@ go_test( "//beacon-chain/p2p/peers:go_default_library", "//beacon-chain/p2p/testing:go_default_library", "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stategen:go_default_library", "//beacon-chain/sync/initial-sync/testing:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//proto/testing:go_default_library", "//shared/attestationutil:go_default_library", "//shared/bls:go_default_library", "//shared/bytesutil:go_default_library", + "//shared/featureconfig:go_default_library", "//shared/params:go_default_library", "//shared/roughtime:go_default_library", "//shared/testutil:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", "@com_github_gogo_protobuf//proto:go_default_library", + "@com_github_hashicorp_golang_lru//:go_default_library", "@com_github_kevinms_leakybucket_go//:go_default_library", "@com_github_libp2p_go_libp2p_core//:go_default_library", "@com_github_libp2p_go_libp2p_core//network:go_default_library", diff --git a/beacon-chain/sync/decode_pubsub.go b/beacon-chain/sync/decode_pubsub.go index bde115cc7c..922cf3091c 100644 --- a/beacon-chain/sync/decode_pubsub.go +++ b/beacon-chain/sync/decode_pubsub.go @@ -16,6 +16,7 @@ func (r *Service) decodePubsubMessage(msg *pubsub.Message) (proto.Message, error } topic := msg.TopicIDs[0] topic = strings.TrimSuffix(topic, r.p2p.Encoding().ProtocolSuffix()) + topic = r.replaceForkDigest(topic) base, ok := p2p.GossipTopicMappings[topic] if !ok { return nil, fmt.Errorf("no message mapped for topic %s", topic) @@ -26,3 +27,10 @@ func (r *Service) decodePubsubMessage(msg *pubsub.Message) (proto.Message, error } return m, nil } + +// Replaces our fork digest with the formatter. +func (r *Service) replaceForkDigest(topic string) string { + subStrings := strings.Split(topic, "/") + subStrings[2] = "%x" + return strings.Join(subStrings, "/") +} diff --git a/beacon-chain/sync/error.go b/beacon-chain/sync/error.go index 1621788660..8a6f4d8729 100644 --- a/beacon-chain/sync/error.go +++ b/beacon-chain/sync/error.go @@ -11,7 +11,7 @@ import ( const genericError = "internal service error" const rateLimitedError = "rate limited" -var errWrongForkVersion = errors.New("wrong fork version") +var errWrongForkDigestVersion = errors.New("wrong fork digest version") var errInvalidEpoch = errors.New("invalid epoch") var responseCodeSuccess = byte(0x00) diff --git a/beacon-chain/sync/initial-sync-old/BUILD.bazel b/beacon-chain/sync/initial-sync-old/BUILD.bazel index adcceb588b..575e758bcf 100644 --- a/beacon-chain/sync/initial-sync-old/BUILD.bazel +++ b/beacon-chain/sync/initial-sync-old/BUILD.bazel @@ -48,13 +48,14 @@ go_test( "//beacon-chain/db/testing:go_default_library", "//beacon-chain/p2p/peers:go_default_library", "//beacon-chain/p2p/testing:go_default_library", - "//beacon-chain/state:go_default_library", "//beacon-chain/sync:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//shared/hashutil:go_default_library", "//shared/params:go_default_library", "//shared/roughtime:go_default_library", "//shared/sliceutil:go_default_library", + "//shared/testutil:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", "@com_github_kevinms_leakybucket_go//:go_default_library", "@com_github_libp2p_go_libp2p_core//network:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", diff --git a/beacon-chain/sync/initial-sync-old/round_robin.go b/beacon-chain/sync/initial-sync-old/round_robin.go index e16bb71fe5..2bb51968b1 100644 --- a/beacon-chain/sync/initial-sync-old/round_robin.go +++ b/beacon-chain/sync/initial-sync-old/round_robin.go @@ -18,6 +18,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" "github.com/prysmaticlabs/prysm/beacon-chain/flags" + "github.com/prysmaticlabs/prysm/beacon-chain/p2p" prysmsync "github.com/prysmaticlabs/prysm/beacon-chain/sync" p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" @@ -55,7 +56,7 @@ func (s *Service) roundRobinSync(genesis time.Time) error { highestFinalizedSlot := helpers.StartSlot(s.highestFinalizedEpoch() + 1) // Step 1 - Sync to end of finalized epoch. for s.chain.HeadSlot() < highestFinalizedSlot { - root, finalizedEpoch, peers := s.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, helpers.SlotToEpoch(s.chain.HeadSlot())) + _, finalizedEpoch, peers := s.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, helpers.SlotToEpoch(s.chain.HeadSlot())) if len(peers) == 0 { log.Warn("No peers; waiting for reconnect") time.Sleep(refreshTime) @@ -118,10 +119,9 @@ func (s *Service) roundRobinSync(genesis time.Time) error { count = 1 } req := &p2ppb.BeaconBlocksByRangeRequest{ - HeadBlockRoot: root, - StartSlot: start, - Count: count, - Step: step, + StartSlot: start, + Count: count, + Step: step, } go func(i int, pid peer.ID) { @@ -254,20 +254,19 @@ func (s *Service) roundRobinSync(genesis time.Time) error { // mitigation. We are already convinced that we are on the correct finalized chain. Any blocks // we receive there after must build on the finalized chain or be considered invalid during // fork choice resolution / block processing. - root, _, pids := s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch()) + _, _, pids := s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch()) for len(pids) == 0 { log.Info("Waiting for a suitable peer before syncing to the head of the chain") time.Sleep(refreshTime) - root, _, pids = s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch()) + _, _, pids = s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch()) } best := pids[0] for head := helpers.SlotsSince(genesis); s.chain.HeadSlot() < head; { req := &p2ppb.BeaconBlocksByRangeRequest{ - HeadBlockRoot: root, - StartSlot: s.chain.HeadSlot() + 1, - Count: mathutil.Min(helpers.SlotsSince(genesis)-s.chain.HeadSlot()+1, 256), - Step: 1, + StartSlot: s.chain.HeadSlot() + 1, + Count: mathutil.Min(helpers.SlotsSince(genesis)-s.chain.HeadSlot()+1, 256), + Step: 1, } log.WithField("req", req).WithField("peer", best.Pretty()).Debug( @@ -306,9 +305,8 @@ func (s *Service) requestBlocks(ctx context.Context, req *p2ppb.BeaconBlocksByRa "start": req.StartSlot, "count": req.Count, "step": req.Step, - "head": fmt.Sprintf("%#x", req.HeadBlockRoot), }).Debug("Requesting blocks") - stream, err := s.p2p.Send(ctx, req, pid) + stream, err := s.p2p.Send(ctx, req, p2p.RPCBlocksByRangeTopic, pid) if err != nil { return nil, errors.Wrap(err, "failed to send request to peer") } diff --git a/beacon-chain/sync/initial-sync-old/round_robin_test.go b/beacon-chain/sync/initial-sync-old/round_robin_test.go index 92d05dd546..0665ad1e9e 100644 --- a/beacon-chain/sync/initial-sync-old/round_robin_test.go +++ b/beacon-chain/sync/initial-sync-old/round_robin_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/kevinms/leakybucket-go" "github.com/libp2p/go-libp2p-core/network" eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" @@ -17,13 +18,13 @@ import ( dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers" p2pt "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" - stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" beaconsync "github.com/prysmaticlabs/prysm/beacon-chain/sync" p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/roughtime" "github.com/prysmaticlabs/prysm/shared/sliceutil" + "github.com/prysmaticlabs/prysm/shared/testutil" "github.com/sirupsen/logrus" ) @@ -54,7 +55,7 @@ func TestConstants(t *testing.T) { } func TestRoundRobinSync(t *testing.T) { - + t.Skip("Test is filled with races and is part of legacy code, pending deletion") tests := []struct { name string currentSlot uint64 @@ -150,38 +151,6 @@ func TestRoundRobinSync(t *testing.T) { }, }, }, - - // TODO(3147): Handle multiple failures. - //{ - // name: "Multiple peers with multiple failures", - // currentSlot: 320, // 10 epochs - // expectedBlockSlots: makeSequence(1, 320), - // peers: []*peerData{ - // { - // blocks: makeSequence(1, 320), - // finalizedEpoch: 4, - // headSlot: 320, - // }, - // { - // blocks: makeSequence(1, 320), - // finalizedEpoch: 4, - // headSlot: 320, - // failureSlots: makeSequence(1, 320), - // }, - // { - // blocks: makeSequence(1, 320), - // finalizedEpoch: 4, - // headSlot: 320, - // failureSlots: makeSequence(1, 320), - // }, - // { - // blocks: makeSequence(1, 320), - // finalizedEpoch: 4, - // headSlot: 320, - // failureSlots: makeSequence(1, 320), - // }, - // }, - //}, { name: "Multiple peers with different finalized epoch", currentSlot: 320, // 10 epochs @@ -266,10 +235,7 @@ func TestRoundRobinSync(t *testing.T) { if err := beaconDB.SaveGenesisBlockRoot(context.Background(), gRoot); err != nil { t.Fatal(err) } - st, err := stateTrie.InitializeFromProto(&p2ppb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + st := testutil.NewBeaconState() if err := beaconDB.SaveState(context.Background(), st, gRoot); err != nil { t.Fatal(err) } @@ -389,14 +355,14 @@ func connectPeers(t *testing.T, host *p2pt.TestP2P, data []*peerData, peerStatus peer.Connect(host) - peerStatus.Add(peer.PeerID(), nil, network.DirOutbound, []uint64{}) + peerStatus.Add(new(enr.Record), peer.PeerID(), nil, network.DirOutbound) peerStatus.SetConnectionState(peer.PeerID(), peers.PeerConnected) peerStatus.SetChainState(peer.PeerID(), &p2ppb.Status{ - HeadForkVersion: params.BeaconConfig().GenesisForkVersion, - FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", datum.finalizedEpoch)), - FinalizedEpoch: datum.finalizedEpoch, - HeadRoot: []byte("head_root"), - HeadSlot: datum.headSlot, + ForkDigest: params.BeaconConfig().GenesisForkVersion, + FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", datum.finalizedEpoch)), + FinalizedEpoch: datum.finalizedEpoch, + HeadRoot: []byte("head_root"), + HeadSlot: datum.headSlot, }) } } diff --git a/beacon-chain/sync/initial-sync/BUILD.bazel b/beacon-chain/sync/initial-sync/BUILD.bazel index 0a20ac78b5..991796a6dc 100644 --- a/beacon-chain/sync/initial-sync/BUILD.bazel +++ b/beacon-chain/sync/initial-sync/BUILD.bazel @@ -68,6 +68,7 @@ go_test( "//shared/roughtime:go_default_library", "//shared/sliceutil:go_default_library", "//shared/testutil:go_default_library", + "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", "@com_github_kevinms_leakybucket_go//:go_default_library", "@com_github_libp2p_go_libp2p_core//network:go_default_library", "@com_github_libp2p_go_libp2p_core//peer:go_default_library", diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher.go b/beacon-chain/sync/initial-sync/blocks_fetcher.go index dcaad9888c..cc08429c9e 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher.go @@ -3,7 +3,6 @@ package initialsync import ( "bytes" "context" - "fmt" "io" "math" "math/rand" @@ -328,10 +327,9 @@ func (f *blocksFetcher) requestBeaconBlocksByRange( } req := &p2ppb.BeaconBlocksByRangeRequest{ - HeadBlockRoot: root, - StartSlot: start, - Count: count, - Step: step, + StartSlot: start, + Count: count, + Step: step, } resp, respErr := f.requestBlocks(ctx, req, pid) @@ -376,10 +374,9 @@ func (f *blocksFetcher) requestBlocks( "start": req.StartSlot, "count": req.Count, "step": req.Step, - "head": fmt.Sprintf("%#x", req.HeadBlockRoot), }).Debug("Requesting blocks") f.Unlock() - stream, err := f.p2p.Send(ctx, req, pid) + stream, err := f.p2p.Send(ctx, req, p2p.RPCBlocksByRangeTopic, pid) if err != nil { return nil, err } @@ -474,7 +471,7 @@ func (f *blocksFetcher) selectPeers(peers []peer.ID) []peer.ID { // nonSkippedSlotAfter checks slots after the given one in an attempt to find non-empty future slot. func (f *blocksFetcher) nonSkippedSlotAfter(ctx context.Context, slot uint64) (uint64, error) { headEpoch := helpers.SlotToEpoch(f.headFetcher.HeadSlot()) - root, epoch, peers := f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch) + _, epoch, peers := f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch) if len(peers) == 0 { return 0, errNoPeersAvailable } @@ -489,10 +486,9 @@ func (f *blocksFetcher) nonSkippedSlotAfter(ctx context.Context, slot uint64) (u for slot <= helpers.StartSlot(epoch+1) { req := &p2ppb.BeaconBlocksByRangeRequest{ - HeadBlockRoot: root, - StartSlot: slot + 1, - Count: blockBatchSize, - Step: 1, + StartSlot: slot + 1, + Count: blockBatchSize, + Step: 1, } blocks, err := f.requestBlocks(ctx, req, nextPID()) diff --git a/beacon-chain/sync/initial-sync/round_robin.go b/beacon-chain/sync/initial-sync/round_robin.go index 23ae4a49d6..89e12375da 100644 --- a/beacon-chain/sync/initial-sync/round_robin.go +++ b/beacon-chain/sync/initial-sync/round_robin.go @@ -14,6 +14,7 @@ import ( blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" + "github.com/prysmaticlabs/prysm/beacon-chain/p2p" prysmsync "github.com/prysmaticlabs/prysm/beacon-chain/sync" p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" @@ -78,20 +79,19 @@ func (s *Service) roundRobinSync(genesis time.Time) error { // mitigation. We are already convinced that we are on the correct finalized chain. Any blocks // we receive there after must build on the finalized chain or be considered invalid during // fork choice resolution / block processing. - root, _, pids := s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch()) + _, _, pids := s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch()) for len(pids) == 0 { log.Info("Waiting for a suitable peer before syncing to the head of the chain") time.Sleep(refreshTime) - root, _, pids = s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch()) + _, _, pids = s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch()) } best := pids[0] for head := helpers.SlotsSince(genesis); s.chain.HeadSlot() < head; { req := &p2ppb.BeaconBlocksByRangeRequest{ - HeadBlockRoot: root, - StartSlot: s.chain.HeadSlot() + 1, - Count: mathutil.Min(helpers.SlotsSince(genesis)-s.chain.HeadSlot()+1, allowedBlocksPerSecond), - Step: 1, + StartSlot: s.chain.HeadSlot() + 1, + Count: mathutil.Min(helpers.SlotsSince(genesis)-s.chain.HeadSlot()+1, allowedBlocksPerSecond), + Step: 1, } log.WithField("req", req).WithField("peer", best.Pretty()).Debug( @@ -131,9 +131,8 @@ func (s *Service) requestBlocks(ctx context.Context, req *p2ppb.BeaconBlocksByRa "start": req.StartSlot, "count": req.Count, "step": req.Step, - "head": fmt.Sprintf("%#x", req.HeadBlockRoot), }).Debug("Requesting blocks") - stream, err := s.p2p.Send(ctx, req, pid) + stream, err := s.p2p.Send(ctx, req, p2p.RPCBlocksByRangeTopic, pid) if err != nil { return nil, errors.Wrap(err, "failed to send request to peer") } diff --git a/beacon-chain/sync/initial-sync/round_robin_test.go b/beacon-chain/sync/initial-sync/round_robin_test.go index 2accbe3145..d6709d5d36 100644 --- a/beacon-chain/sync/initial-sync/round_robin_test.go +++ b/beacon-chain/sync/initial-sync/round_robin_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/kevinms/leakybucket-go" "github.com/libp2p/go-libp2p-core/network" eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" @@ -397,14 +398,14 @@ func connectPeers(t *testing.T, host *p2pt.TestP2P, data []*peerData, peerStatus peer.Connect(host) - peerStatus.Add(peer.PeerID(), nil, network.DirOutbound, []uint64{}) + peerStatus.Add(new(enr.Record), peer.PeerID(), nil, network.DirOutbound) peerStatus.SetConnectionState(peer.PeerID(), peers.PeerConnected) peerStatus.SetChainState(peer.PeerID(), &p2ppb.Status{ - HeadForkVersion: params.BeaconConfig().GenesisForkVersion, - FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", datum.finalizedEpoch)), - FinalizedEpoch: datum.finalizedEpoch, - HeadRoot: []byte("head_root"), - HeadSlot: datum.headSlot, + ForkDigest: params.BeaconConfig().GenesisForkVersion, + FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", datum.finalizedEpoch)), + FinalizedEpoch: datum.finalizedEpoch, + HeadRoot: []byte("head_root"), + HeadSlot: datum.headSlot, }) } } diff --git a/beacon-chain/sync/metrics.go b/beacon-chain/sync/metrics.go index 95a2e1e655..ab6fee1d68 100644 --- a/beacon-chain/sync/metrics.go +++ b/beacon-chain/sync/metrics.go @@ -1,11 +1,23 @@ package sync import ( + "fmt" + "reflect" + "strings" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + pb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/prysm/beacon-chain/p2p" ) var ( + topicPeerCount = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "p2p_topic_peer_count", + Help: "The number of peers subscribed to a given topic.", + }, []string{"topic"}, + ) messageReceivedCounter = promauto.NewCounterVec( prometheus.CounterOpts{ Name: "p2p_message_received_total", @@ -58,3 +70,32 @@ var ( }, ) ) + +func (r *Service) updateMetrics() { + // We update the dynamic subnet topics. + digest, err := r.p2p.ForkDigest() + if err != nil { + log.WithError(err).Errorf("Could not compute fork digest") + } + indices := r.aggregatorCommitteeIndices(r.chain.CurrentSlot()) + attTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})] + attTopic += r.p2p.Encoding().ProtocolSuffix() + for _, committeeIdx := range indices { + formattedTopic := fmt.Sprintf(attTopic, digest, committeeIdx) + topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(r.p2p.PubSub().ListPeers(formattedTopic)))) + } + // We update all other gossip topics. + for topic := range p2p.GossipTopicMappings { + // We already updated attestation subnet topics. + if strings.Contains(topic, "committee_index") { + continue + } + topic += r.p2p.Encoding().ProtocolSuffix() + if !strings.Contains(topic, "%x") { + topicPeerCount.WithLabelValues(topic).Set(float64(len(r.p2p.PubSub().ListPeers(topic)))) + continue + } + formattedTopic := fmt.Sprintf(topic, digest) + topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(r.p2p.PubSub().ListPeers(formattedTopic)))) + } +} diff --git a/beacon-chain/sync/pending_attestations_queue.go b/beacon-chain/sync/pending_attestations_queue.go index 0f57cbfc5d..28727535c1 100644 --- a/beacon-chain/sync/pending_attestations_queue.go +++ b/beacon-chain/sync/pending_attestations_queue.go @@ -63,23 +63,24 @@ func (s *Service) processPendingAtts(ctx context.Context) error { attestations := s.blkRootToPendingAtts[bRoot] s.pendingAttsLock.RUnlock() // Has the pending attestation's missing block arrived and the node processed block yet? - hasStateSummary := featureconfig.Get().NewStateMgmt && s.db.HasStateSummary(ctx, bRoot) || s.stateSummaryCache.Has(bRoot) + hasStateSummary := !featureconfig.Get().DisableNewStateMgmt && s.db.HasStateSummary(ctx, bRoot) || s.stateSummaryCache.Has(bRoot) if s.db.HasBlock(ctx, bRoot) && (s.db.HasState(ctx, bRoot) || hasStateSummary) { numberOfBlocksRecoveredFromAtt.Inc() - for _, att := range attestations { + for _, signedAtt := range attestations { + att := signedAtt.Message // The pending attestations can arrive in both aggregated and unaggregated forms, // each from has distinct validation steps. if helpers.IsAggregated(att.Aggregate) { // Save the pending aggregated attestation to the pool if it passes the aggregated // validation steps. - if s.validateBlockInAttestation(ctx, att) && s.validateAggregatedAtt(ctx, att) { + if s.validateBlockInAttestation(ctx, signedAtt) && s.validateAggregatedAtt(ctx, signedAtt) { if err := s.attPool.SaveAggregatedAttestation(att.Aggregate); err != nil { return err } numberOfAttsRecovered.Inc() - // Broadcasting the attestation again once a node is able to process it. - if err := s.p2p.Broadcast(ctx, att); err != nil { + // Broadcasting the signed attestation again once a node is able to process it. + if err := s.p2p.Broadcast(ctx, signedAtt); err != nil { log.WithError(err).Error("Failed to broadcast") } } @@ -94,8 +95,8 @@ func (s *Service) processPendingAtts(ctx context.Context) error { } numberOfAttsRecovered.Inc() - // Broadcasting the attestation again once a node is able to process it. - if err := s.p2p.Broadcast(ctx, att); err != nil { + // Broadcasting the signed attestation again once a node is able to process it. + if err := s.p2p.Broadcast(ctx, signedAtt); err != nil { log.WithError(err).Error("Failed to broadcast") } } @@ -113,7 +114,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error { // Pending attestation's missing block has not arrived yet. log.WithFields(logrus.Fields{ "currentSlot": s.chain.CurrentSlot(), - "attSlot": attestations[0].Aggregate.Data.Slot, + "attSlot": attestations[0].Message.Aggregate.Data.Slot, "attCount": len(attestations), "blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])), }).Debug("Requesting block for pending attestation") @@ -124,7 +125,7 @@ func (s *Service) processPendingAtts(ctx context.Context) error { return nil } pid := pids[rand.Int()%len(pids)] - targetSlot := helpers.SlotToEpoch(attestations[0].Aggregate.Data.Target.Epoch) + targetSlot := helpers.SlotToEpoch(attestations[0].Message.Aggregate.Data.Target.Epoch) for _, p := range pids { cs, err := s.p2p.Peers().ChainState(p) if err != nil { @@ -149,14 +150,14 @@ func (s *Service) processPendingAtts(ctx context.Context) error { // This defines how pending attestations is saved in the map. The key is the // root of the missing block. The value is the list of pending attestations // that voted for that block root. -func (s *Service) savePendingAtt(att *ethpb.AggregateAttestationAndProof) { - root := bytesutil.ToBytes32(att.Aggregate.Data.BeaconBlockRoot) +func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof) { + root := bytesutil.ToBytes32(att.Message.Aggregate.Data.BeaconBlockRoot) s.pendingAttsLock.Lock() defer s.pendingAttsLock.Unlock() _, ok := s.blkRootToPendingAtts[root] if !ok { - s.blkRootToPendingAtts[root] = []*ethpb.AggregateAttestationAndProof{att} + s.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{att} return } @@ -176,7 +177,7 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot uint64) { for bRoot, atts := range s.blkRootToPendingAtts { for i := len(atts) - 1; i >= 0; i-- { - if slot >= atts[i].Aggregate.Data.Slot+params.BeaconConfig().SlotsPerEpoch { + if slot >= atts[i].Message.Aggregate.Data.Slot+params.BeaconConfig().SlotsPerEpoch { // Remove the pending attestation from the list in place. atts = append(atts[:i], atts[i+1:]...) numberOfAttsNotRecovered.Inc() diff --git a/beacon-chain/sync/pending_attestations_queue_test.go b/beacon-chain/sync/pending_attestations_queue_test.go index a311d921f5..7e82732e16 100644 --- a/beacon-chain/sync/pending_attestations_queue_test.go +++ b/beacon-chain/sync/pending_attestations_queue_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/libp2p/go-libp2p-core/network" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" @@ -17,7 +18,6 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers" p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" - beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/attestationutil" "github.com/prysmaticlabs/prysm/shared/bls" @@ -38,7 +38,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) { if len(p1.Host.Network().Peers()) != 1 { t.Error("Expected peers to be connected") } - p1.Peers().Add(p2.PeerID(), nil, network.DirOutbound, []uint64{}) + p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound) p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected) p1.Peers().SetChainState(p2.PeerID(), &pb.Status{}) @@ -46,12 +46,12 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) { p2p: p1, db: db, chain: &mock.ChainService{Genesis: roughtime.Now()}, - blkRootToPendingAtts: make(map[[32]byte][]*ethpb.AggregateAttestationAndProof), + blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), stateSummaryCache: cache.NewStateSummaryCache(), } a := ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}} - r.blkRootToPendingAtts[[32]byte{'A'}] = []*ethpb.AggregateAttestationAndProof{a} + r.blkRootToPendingAtts[[32]byte{'A'}] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a}} if err := r.processPendingAtts(context.Background()); err != nil { t.Fatal(err) } @@ -69,14 +69,14 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) { p2p: p1, db: db, chain: &mock.ChainService{Genesis: roughtime.Now()}, - blkRootToPendingAtts: make(map[[32]byte][]*ethpb.AggregateAttestationAndProof), + blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), attPool: attestations.NewPool(), stateSummaryCache: cache.NewStateSummaryCache(), } a := ðpb.AggregateAttestationAndProof{ Aggregate: ðpb.Attestation{ - Signature: bls.RandKey().Sign([]byte("foo"), 0).Marshal(), + Signature: bls.RandKey().Sign([]byte("foo")).Marshal(), AggregationBits: bitfield.Bitlist{0x02}, Data: ðpb.AttestationData{ Target: ðpb.Checkpoint{}}}} @@ -86,10 +86,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) { if err != nil { t.Fatal(err) } - s, err := beaconstate.InitializeFromProto(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + s := testutil.NewBeaconState() if err := r.db.SaveBlock(context.Background(), b); err != nil { t.Fatal(err) } @@ -97,7 +94,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) { t.Fatal(err) } - r.blkRootToPendingAtts[r32] = []*ethpb.AggregateAttestationAndProof{a} + r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a}} if err := r.processPendingAtts(context.Background()); err != nil { t.Fatal(err) } @@ -121,6 +118,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) { defer dbtest.TeardownDB(t, db) p1 := p2ptest.NewTestP2P(t) validators := uint64(256) + testutil.ResetCache() beaconState, privKeys := testutil.DeterministicGenesisState(t, validators) sb := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}} @@ -152,32 +150,40 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) { if err != nil { t.Error(err) } - hashTreeRoot, err := ssz.HashTreeRoot(att.Data) - if err != nil { - t.Error(err) - } - domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } + hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, domain) + if err != nil { + t.Error(err) + } sigs := make([]*bls.Signature, len(attestingIndices)) for i, indice := range attestingIndices { - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } att.Signature = bls.AggregateSignatures(sigs).Marshal()[:] - slotRoot, err := ssz.HashTreeRoot(att.Data.Slot) + slotRoot, err := helpers.ComputeSigningRoot(att.Data.Slot, domain) if err != nil { t.Fatal(err) } - - sig := privKeys[154].Sign(slotRoot[:], domain) + sig := privKeys[33].Sign(slotRoot[:]) aggregateAndProof := ðpb.AggregateAttestationAndProof{ SelectionProof: sig.Marshal(), Aggregate: att, - AggregatorIndex: 154, + AggregatorIndex: 33, } + domain, err = helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainAggregateAndProof, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + signingRoot, err := helpers.ComputeSigningRoot(aggregateAndProof, domain) + if err != nil { + t.Error(err) + } + aggreSig := privKeys[33].Sign(signingRoot[:]).Marshal() if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil { t.Fatal(err) @@ -191,7 +197,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) { FinalizedCheckPoint: ðpb.Checkpoint{ Epoch: 0, }}, - blkRootToPendingAtts: make(map[[32]byte][]*ethpb.AggregateAttestationAndProof), + blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), attPool: attestations.NewPool(), stateSummaryCache: cache.NewStateSummaryCache(), } @@ -204,21 +210,18 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) { if err := r.db.SaveBlock(context.Background(), sb); err != nil { t.Fatal(err) } - s, err := beaconstate.InitializeFromProto(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + s := testutil.NewBeaconState() if err := r.db.SaveState(context.Background(), s, r32); err != nil { t.Fatal(err) } - r.blkRootToPendingAtts[r32] = []*ethpb.AggregateAttestationAndProof{aggregateAndProof} + r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}} if err := r.processPendingAtts(context.Background()); err != nil { t.Fatal(err) } if len(r.attPool.AggregatedAttestations()) != 1 { - t.Error("Did not save aggregated att") + t.Fatal("Did not save aggregated att") } if !reflect.DeepEqual(r.attPool.AggregatedAttestations()[0], att) { t.Error("Incorrect saved att") @@ -235,7 +238,7 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) { defer dbtest.TeardownDB(t, db) s := &Service{ - blkRootToPendingAtts: make(map[[32]byte][]*ethpb.AggregateAttestationAndProof), + blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), } // 100 Attestations per block root. @@ -244,15 +247,18 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) { r3 := [32]byte{'C'} for i := 0; i < 100; i++ { - s.savePendingAtt(ðpb.AggregateAttestationAndProof{ - Aggregate: ðpb.Attestation{ - Data: ðpb.AttestationData{Slot: uint64(i), BeaconBlockRoot: r1[:]}}}) - s.savePendingAtt(ðpb.AggregateAttestationAndProof{ - Aggregate: ðpb.Attestation{ - Data: ðpb.AttestationData{Slot: uint64(i), BeaconBlockRoot: r2[:]}}}) - s.savePendingAtt(ðpb.AggregateAttestationAndProof{ - Aggregate: ðpb.Attestation{ - Data: ðpb.AttestationData{Slot: uint64(i), BeaconBlockRoot: r3[:]}}}) + s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{ + Message: ðpb.AggregateAttestationAndProof{ + Aggregate: ðpb.Attestation{ + Data: ðpb.AttestationData{Slot: uint64(i), BeaconBlockRoot: r1[:]}}}}) + s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{ + Message: ðpb.AggregateAttestationAndProof{ + Aggregate: ðpb.Attestation{ + Data: ðpb.AttestationData{Slot: uint64(i), BeaconBlockRoot: r2[:]}}}}) + s.savePendingAtt(ðpb.SignedAggregateAttestationAndProof{ + Message: ðpb.AggregateAttestationAndProof{ + Aggregate: ðpb.Attestation{ + Data: ðpb.AttestationData{Slot: uint64(i), BeaconBlockRoot: r3[:]}}}}) } if len(s.blkRootToPendingAtts[r1]) != 100 { diff --git a/beacon-chain/sync/pending_blocks_queue_test.go b/beacon-chain/sync/pending_blocks_queue_test.go index 7f69fda9d1..a1fd76b355 100644 --- a/beacon-chain/sync/pending_blocks_queue_test.go +++ b/beacon-chain/sync/pending_blocks_queue_test.go @@ -5,6 +5,7 @@ import ( "sync" "testing" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/protocol" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" @@ -122,9 +123,9 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks2(t *testing.T) { if code == 0 { t.Error("Expected a non-zero code") } - if errMsg != errWrongForkVersion.Error() { - t.Logf("Received error string len %d, wanted error string len %d", len(errMsg), len(errWrongForkVersion.Error())) - t.Errorf("Received unexpected message response in the stream: %s. Wanted %s.", errMsg, errWrongForkVersion.Error()) + if errMsg != errWrongForkDigestVersion.Error() { + t.Logf("Received error string len %d, wanted error string len %d", len(errMsg), len(errWrongForkDigestVersion.Error())) + t.Errorf("Received unexpected message response in the stream: %s. Wanted %s.", errMsg, errWrongForkDigestVersion.Error()) } }) @@ -138,7 +139,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks2(t *testing.T) { }, slotToPendingBlocks: make(map[uint64]*ethpb.SignedBeaconBlock), seenPendingBlocks: make(map[[32]byte]bool), } - p1.Peers().Add(p2.PeerID(), nil, network.DirOutbound, []uint64{}) + p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound) p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected) p1.Peers().SetChainState(p2.PeerID(), &pb.Status{}) @@ -251,7 +252,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) { }, slotToPendingBlocks: make(map[uint64]*ethpb.SignedBeaconBlock), seenPendingBlocks: make(map[[32]byte]bool), } - p1.Peers().Add(p1.PeerID(), nil, network.DirOutbound, []uint64{}) + p1.Peers().Add(new(enr.Record), p1.PeerID(), nil, network.DirOutbound) p1.Peers().SetConnectionState(p1.PeerID(), peers.PeerConnected) p1.Peers().SetChainState(p1.PeerID(), &pb.Status{}) diff --git a/beacon-chain/sync/rpc.go b/beacon-chain/sync/rpc.go index 8ab08fbbfe..7fa3b5fb9e 100644 --- a/beacon-chain/sync/rpc.go +++ b/beacon-chain/sync/rpc.go @@ -3,10 +3,12 @@ package sync import ( "context" "reflect" + "strings" "time" libp2pcore "github.com/libp2p/go-libp2p-core" "github.com/libp2p/go-libp2p-core/network" + "github.com/prysmaticlabs/prysm/beacon-chain/p2p" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/roughtime" "github.com/prysmaticlabs/prysm/shared/traceutil" @@ -31,25 +33,35 @@ type rpcHandler func(context.Context, interface{}, libp2pcore.Stream) error // registerRPCHandlers for p2p RPC. func (r *Service) registerRPCHandlers() { r.registerRPC( - "/eth2/beacon_chain/req/status/1", + p2p.RPCStatusTopic, &pb.Status{}, r.statusRPCHandler, ) r.registerRPC( - "/eth2/beacon_chain/req/goodbye/1", + p2p.RPCGoodByeTopic, new(uint64), r.goodbyeRPCHandler, ) r.registerRPC( - "/eth2/beacon_chain/req/beacon_blocks_by_range/1", + p2p.RPCBlocksByRangeTopic, &pb.BeaconBlocksByRangeRequest{}, r.beaconBlocksByRangeRPCHandler, ) r.registerRPC( - "/eth2/beacon_chain/req/beacon_blocks_by_root/1", + p2p.RPCBlocksByRootTopic, [][32]byte{}, r.beaconBlocksRootRPCHandler, ) + r.registerRPC( + p2p.RPCPingTopic, + new(uint64), + r.pingHandler, + ) + r.registerRPC( + p2p.RPCMetaDataTopic, + new(interface{}), + r.metaDataHandler, + ) } // registerRPC for a given topic with an expected protobuf message type. @@ -78,6 +90,19 @@ func (r *Service) registerRPC(topic string, base interface{}, handle rpcHandler) // Increment message received counter. messageReceivedCounter.WithLabelValues(topic).Inc() + // since metadata requests do not have any data in the payload, we + // do not decode anything. + if strings.Contains(topic, p2p.RPCMetaDataTopic) { + if err := handle(ctx, new(interface{}), stream); err != nil { + messageFailedProcessingCounter.WithLabelValues(topic).Inc() + if err != errWrongForkDigestVersion { + log.WithError(err).Warn("Failed to handle p2p RPC") + } + traceutil.AnnotateError(span, err) + } + return + } + // Given we have an input argument that can be pointer or [][32]byte, this gives us // a way to check for its reflect.Kind and based on the result, we can decode // accordingly. @@ -91,7 +116,7 @@ func (r *Service) registerRPC(topic string, base interface{}, handle rpcHandler) } if err := handle(ctx, msg.Interface(), stream); err != nil { messageFailedProcessingCounter.WithLabelValues(topic).Inc() - if err != errWrongForkVersion { + if err != errWrongForkDigestVersion { log.WithError(err).Warn("Failed to handle p2p RPC") } traceutil.AnnotateError(span, err) @@ -105,7 +130,7 @@ func (r *Service) registerRPC(topic string, base interface{}, handle rpcHandler) } if err := handle(ctx, msg.Elem().Interface(), stream); err != nil { messageFailedProcessingCounter.WithLabelValues(topic).Inc() - if err != errWrongForkVersion { + if err != errWrongForkDigestVersion { log.WithError(err).Warn("Failed to handle p2p RPC") } traceutil.AnnotateError(span, err) diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_root.go b/beacon-chain/sync/rpc_beacon_blocks_by_root.go index 77a98a53d5..295e4b3d70 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_root.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_root.go @@ -9,6 +9,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/pkg/errors" "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/p2p" ) // sendRecentBeaconBlocksRequest sends a recent beacon blocks request to a peer to get @@ -17,7 +18,7 @@ func (r *Service) sendRecentBeaconBlocksRequest(ctx context.Context, blockRoots ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - stream, err := r.p2p.Send(ctx, blockRoots, id) + stream, err := r.p2p.Send(ctx, blockRoots, p2p.RPCBlocksByRootTopic, id) if err != nil { return err } diff --git a/beacon-chain/sync/rpc_metadata.go b/beacon-chain/sync/rpc_metadata.go new file mode 100644 index 0000000000..40c46d16d5 --- /dev/null +++ b/beacon-chain/sync/rpc_metadata.go @@ -0,0 +1,62 @@ +package sync + +import ( + "context" + "time" + + libp2pcore "github.com/libp2p/go-libp2p-core" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/beacon-chain/p2p" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" +) + +// metaDataHandler reads the incoming metadata rpc request from the peer. +func (r *Service) metaDataHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error { + defer func() { + if err := stream.Close(); err != nil { + log.WithError(err).Error("Failed to close stream") + } + }() + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + setRPCStreamDeadlines(stream) + + if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil { + return err + } + _, err := r.p2p.Encoding().EncodeWithLength(stream, r.p2p.Metadata()) + return err +} + +func (r *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (*pb.MetaData, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + stream, err := r.p2p.Send(ctx, new(interface{}), p2p.RPCMetaDataTopic, id) + if err != nil { + return nil, err + } + // we close the stream outside of `send` because + // metadata requests send no payload, so closing the + // stream early leads it to a reset. + defer func() { + if err := stream.Close(); err != nil { + log.WithError(err).Error("Failed to close stream") + } + }() + code, errMsg, err := ReadStatusCode(stream, r.p2p.Encoding()) + if err != nil { + return nil, err + } + if code != 0 { + r.p2p.Peers().IncrementBadResponses(stream.Conn().RemotePeer()) + return nil, errors.New(errMsg) + } + msg := new(pb.MetaData) + if err := r.p2p.Encoding().DecodeWithLength(stream, msg); err != nil { + r.p2p.Peers().IncrementBadResponses(stream.Conn().RemotePeer()) + return nil, err + } + return msg, nil +} diff --git a/beacon-chain/sync/rpc_metadata_test.go b/beacon-chain/sync/rpc_metadata_test.go new file mode 100644 index 0000000000..6f4eb3eb3d --- /dev/null +++ b/beacon-chain/sync/rpc_metadata_test.go @@ -0,0 +1,133 @@ +package sync + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/protocol" + "github.com/prysmaticlabs/go-ssz" + db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/p2p" + p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/testutil" +) + +func TestMetaDataRPCHandler_ReceivesMetadata(t *testing.T) { + p1 := p2ptest.NewTestP2P(t) + p2 := p2ptest.NewTestP2P(t) + p1.Connect(p2) + if len(p1.Host.Network().Peers()) != 1 { + t.Error("Expected peers to be connected") + } + bitfield := [8]byte{'A', 'B'} + p1.LocalMetadata = &pb.MetaData{ + SeqNumber: 2, + Attnets: bitfield[:], + } + + // Set up a head state in the database with data we expect. + d := db.SetupDB(t) + defer db.TeardownDB(t, d) + + r := &Service{ + db: d, + p2p: p1, + } + + // Setup streams + pcl := protocol.ID("/testing") + var wg sync.WaitGroup + wg.Add(1) + p2.Host.SetStreamHandler(pcl, func(stream network.Stream) { + defer wg.Done() + expectSuccess(t, r, stream) + out := new(pb.MetaData) + if err := r.p2p.Encoding().DecodeWithLength(stream, out); err != nil { + t.Fatal(err) + } + if !ssz.DeepEqual(p1.LocalMetadata, out) { + t.Fatalf("Metadata unequal, received %v but wanted %v", out, p1.LocalMetadata) + } + }) + stream1, err := p1.Host.NewStream(context.Background(), p2.Host.ID(), pcl) + if err != nil { + t.Fatal(err) + } + + err = r.metaDataHandler(context.Background(), new(interface{}), stream1) + if err != nil { + t.Errorf("Unxpected error: %v", err) + } + + if testutil.WaitTimeout(&wg, 1*time.Second) { + t.Fatal("Did not receive stream within 1 sec") + } + + conns := p1.Host.Network().ConnsToPeer(p2.Host.ID()) + if len(conns) == 0 { + t.Error("Peer is disconnected despite receiving a valid ping") + } +} + +func TestMetadataRPCHandler_SendsMetadata(t *testing.T) { + p1 := p2ptest.NewTestP2P(t) + p2 := p2ptest.NewTestP2P(t) + p1.Connect(p2) + if len(p1.Host.Network().Peers()) != 1 { + t.Error("Expected peers to be connected") + } + bitfield := [8]byte{'A', 'B'} + p2.LocalMetadata = &pb.MetaData{ + SeqNumber: 2, + Attnets: bitfield[:], + } + + // Set up a head state in the database with data we expect. + d := db.SetupDB(t) + defer db.TeardownDB(t, d) + + r := &Service{ + db: d, + p2p: p1, + } + + r2 := &Service{ + db: d, + p2p: p2, + } + + // Setup streams + pcl := protocol.ID(p2p.RPCMetaDataTopic + r.p2p.Encoding().ProtocolSuffix()) + var wg sync.WaitGroup + wg.Add(1) + p2.Host.SetStreamHandler(pcl, func(stream network.Stream) { + defer wg.Done() + + err := r2.metaDataHandler(context.Background(), new(interface{}), stream) + if err != nil { + t.Fatal(err) + } + }) + + metadata, err := r.sendMetaDataRequest(context.Background(), p2.Host.ID()) + if err != nil { + t.Errorf("Unxpected error: %v", err) + } + + if !ssz.DeepEqual(metadata, p2.LocalMetadata) { + t.Fatalf("Metadata unequal, received %v but wanted %v", metadata, p2.LocalMetadata) + } + + if testutil.WaitTimeout(&wg, 1*time.Second) { + t.Fatal("Did not receive stream within 1 sec") + } + + conns := p1.Host.Network().ConnsToPeer(p2.Host.ID()) + if len(conns) == 0 { + t.Error("Peer is disconnected despite receiving a valid ping") + } +} diff --git a/beacon-chain/sync/rpc_ping.go b/beacon-chain/sync/rpc_ping.go new file mode 100644 index 0000000000..d4d99b45b1 --- /dev/null +++ b/beacon-chain/sync/rpc_ping.go @@ -0,0 +1,107 @@ +package sync + +import ( + "context" + "errors" + "fmt" + "time" + + libp2pcore "github.com/libp2p/go-libp2p-core" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/prysmaticlabs/prysm/beacon-chain/p2p" +) + +// pingHandler reads the incoming ping rpc message from the peer. +func (r *Service) pingHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error { + defer func() { + if err := stream.Close(); err != nil { + log.WithError(err).Error("Failed to close stream") + } + }() + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + setRPCStreamDeadlines(stream) + + m, ok := msg.(*uint64) + if !ok { + return fmt.Errorf("wrong message type for ping, got %T, wanted *uint64", msg) + } + valid, err := r.validateSequenceNum(*m, stream.Conn().RemotePeer()) + if err != nil { + return err + } + if !valid { + // send metadata request in a new routine and stream. + go func() { + md, err := r.sendMetaDataRequest(ctx, stream.Conn().RemotePeer()) + if err != nil { + log.WithField("peer", stream.Conn().RemotePeer()).WithError(err).Error("Failed to send metadata request") + return + } + // update metadata if there is no error + r.p2p.Peers().SetMetadata(stream.Conn().RemotePeer(), md) + }() + } + if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil { + return err + } + _, err = r.p2p.Encoding().EncodeWithLength(stream, r.p2p.MetadataSeq()) + return err +} + +func (r *Service) sendPingRequest(ctx context.Context, id peer.ID) error { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + metadataSeq := r.p2p.MetadataSeq() + stream, err := r.p2p.Send(ctx, &metadataSeq, p2p.RPCPingTopic, id) + if err != nil { + return err + } + + code, errMsg, err := ReadStatusCode(stream, r.p2p.Encoding()) + if err != nil { + return err + } + + if code != 0 { + r.p2p.Peers().IncrementBadResponses(stream.Conn().RemotePeer()) + return errors.New(errMsg) + } + msg := new(uint64) + if err := r.p2p.Encoding().DecodeWithLength(stream, msg); err != nil { + r.p2p.Peers().IncrementBadResponses(stream.Conn().RemotePeer()) + return err + } + valid, err := r.validateSequenceNum(*msg, stream.Conn().RemotePeer()) + if err != nil { + r.p2p.Peers().IncrementBadResponses(stream.Conn().RemotePeer()) + return err + } + if valid { + return nil + } + md, err := r.sendMetaDataRequest(ctx, stream.Conn().RemotePeer()) + if err != nil { + // do not increment bad responses, as its + // already done in the request method. + return err + } + r.p2p.Peers().SetMetadata(stream.Conn().RemotePeer(), md) + return nil +} + +// validates the peer's sequence number. +func (r *Service) validateSequenceNum(seq uint64, id peer.ID) (bool, error) { + md, err := r.p2p.Peers().Metadata(id) + if err != nil { + return false, err + } + if md == nil { + return false, nil + } + if md.SeqNumber != seq { + return false, nil + } + return true, nil +} diff --git a/beacon-chain/sync/rpc_ping_test.go b/beacon-chain/sync/rpc_ping_test.go new file mode 100644 index 0000000000..f3882888a2 --- /dev/null +++ b/beacon-chain/sync/rpc_ping_test.go @@ -0,0 +1,151 @@ +package sync + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/protocol" + db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" + p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/testutil" +) + +func TestPingRPCHandler_ReceivesPing(t *testing.T) { + p1 := p2ptest.NewTestP2P(t) + p2 := p2ptest.NewTestP2P(t) + p1.Connect(p2) + if len(p1.Host.Network().Peers()) != 1 { + t.Error("Expected peers to be connected") + } + p1.LocalMetadata = &pb.MetaData{ + SeqNumber: 2, + Attnets: []byte{'A', 'B'}, + } + + p2.LocalMetadata = &pb.MetaData{ + SeqNumber: 2, + Attnets: []byte{'C', 'D'}, + } + + // Set up a head state in the database with data we expect. + d := db.SetupDB(t) + defer db.TeardownDB(t, d) + + r := &Service{ + db: d, + p2p: p1, + } + + p1.Peers().Add(new(enr.Record), p2.Host.ID(), p2.Host.Addrs()[0], network.DirUnknown) + p1.Peers().SetMetadata(p2.Host.ID(), p2.LocalMetadata) + + // Setup streams + pcl := protocol.ID("/testing") + var wg sync.WaitGroup + wg.Add(1) + p2.Host.SetStreamHandler(pcl, func(stream network.Stream) { + defer wg.Done() + expectSuccess(t, r, stream) + out := new(uint64) + if err := r.p2p.Encoding().DecodeWithLength(stream, out); err != nil { + t.Fatal(err) + } + if *out != 2 { + t.Fatalf("Wanted 2 but got %d as our sequence number", *out) + } + }) + stream1, err := p1.Host.NewStream(context.Background(), p2.Host.ID(), pcl) + if err != nil { + t.Fatal(err) + } + seqNumber := uint64(1) + + err = r.pingHandler(context.Background(), &seqNumber, stream1) + if err != nil { + t.Errorf("Unxpected error: %v", err) + } + + if testutil.WaitTimeout(&wg, 1*time.Second) { + t.Fatal("Did not receive stream within 1 sec") + } + + conns := p1.Host.Network().ConnsToPeer(p2.Host.ID()) + if len(conns) == 0 { + t.Error("Peer is disconnected despite receiving a valid ping") + } +} + +func TestPingRPCHandler_SendsPing(t *testing.T) { + p1 := p2ptest.NewTestP2P(t) + p2 := p2ptest.NewTestP2P(t) + p1.Connect(p2) + if len(p1.Host.Network().Peers()) != 1 { + t.Error("Expected peers to be connected") + } + p1.LocalMetadata = &pb.MetaData{ + SeqNumber: 2, + Attnets: []byte{'A', 'B'}, + } + + p2.LocalMetadata = &pb.MetaData{ + SeqNumber: 2, + Attnets: []byte{'C', 'D'}, + } + + // Set up a head state in the database with data we expect. + d := db.SetupDB(t) + defer db.TeardownDB(t, d) + + r := &Service{ + db: d, + p2p: p1, + } + + p1.Peers().Add(new(enr.Record), p2.Host.ID(), p2.Host.Addrs()[0], network.DirUnknown) + p1.Peers().SetMetadata(p2.Host.ID(), p2.LocalMetadata) + + p2.Peers().Add(new(enr.Record), p1.Host.ID(), p1.Host.Addrs()[0], network.DirUnknown) + p2.Peers().SetMetadata(p1.Host.ID(), p1.LocalMetadata) + + r2 := &Service{ + db: d, + p2p: p2, + } + // Setup streams + pcl := protocol.ID("/eth2/beacon_chain/req/ping/1/ssz") + var wg sync.WaitGroup + wg.Add(1) + p2.Host.SetStreamHandler(pcl, func(stream network.Stream) { + defer wg.Done() + out := new(uint64) + if err := r2.p2p.Encoding().DecodeWithLength(stream, out); err != nil { + t.Fatal(err) + } + if *out != 2 { + t.Fatalf("Wanted 2 but got %d as our sequence number", *out) + } + err := r2.pingHandler(context.Background(), out, stream) + if err != nil { + t.Fatal(err) + } + }) + + err := r.sendPingRequest(context.Background(), p2.Host.ID()) + if err != nil { + t.Errorf("Unxpected error: %v", err) + } + + if testutil.WaitTimeout(&wg, 1*time.Second) { + t.Fatal("Did not receive stream within 1 sec") + } + + conns := p1.Host.Network().ConnsToPeer(p2.Host.ID()) + if len(conns) == 0 { + t.Error("Peer is disconnected despite receiving a valid ping") + } +} diff --git a/beacon-chain/sync/rpc_status.go b/beacon-chain/sync/rpc_status.go index 9f2c6d9331..e31141a080 100644 --- a/beacon-chain/sync/rpc_status.go +++ b/beacon-chain/sync/rpc_status.go @@ -10,6 +10,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/beacon-chain/p2p" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/roughtime" @@ -32,8 +33,8 @@ func (r *Service) maintainPeerStatuses() { return } if roughtime.Now().After(lastUpdated.Add(interval)) { - if err := r.sendRPCStatusRequest(r.ctx, id); err != nil { - log.WithField("peer", id).WithError(err).Error("Failed to request peer status") + if err := r.reValidatePeer(r.ctx, id); err != nil { + log.WithField("peer", id).WithError(err).Error("Failed to reValidate peer") } } }(pid) @@ -77,14 +78,18 @@ func (r *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error { return err } - resp := &pb.Status{ - HeadForkVersion: r.chain.CurrentFork().CurrentVersion, - FinalizedRoot: r.chain.FinalizedCheckpt().Root, - FinalizedEpoch: r.chain.FinalizedCheckpt().Epoch, - HeadRoot: headRoot, - HeadSlot: r.chain.HeadSlot(), + forkDigest, err := r.p2p.ForkDigest() + if err != nil { + return err } - stream, err := r.p2p.Send(ctx, resp, id) + resp := &pb.Status{ + ForkDigest: forkDigest[:], + FinalizedRoot: r.chain.FinalizedCheckpt().Root, + FinalizedEpoch: r.chain.FinalizedCheckpt().Epoch, + HeadRoot: headRoot, + HeadSlot: r.chain.HeadSlot(), + } + stream, err := r.p2p.Send(ctx, resp, p2p.RPCStatusTopic, id) if err != nil { return err } @@ -112,6 +117,16 @@ func (r *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error { return err } +func (r *Service) reValidatePeer(ctx context.Context, id peer.ID) error { + if err := r.sendRPCStatusRequest(ctx, id); err != nil { + return err + } + if err := r.sendPingRequest(ctx, id); err != nil { + return err + } + return nil +} + func (r *Service) removeDisconnectedPeerStatus(ctx context.Context, pid peer.ID) error { return nil } @@ -164,12 +179,16 @@ func (r *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream return err } + forkDigest, err := r.p2p.ForkDigest() + if err != nil { + return err + } resp := &pb.Status{ - HeadForkVersion: r.chain.CurrentFork().CurrentVersion, - FinalizedRoot: r.chain.FinalizedCheckpt().Root, - FinalizedEpoch: r.chain.FinalizedCheckpt().Epoch, - HeadRoot: headRoot, - HeadSlot: r.chain.HeadSlot(), + ForkDigest: forkDigest[:], + FinalizedRoot: r.chain.FinalizedCheckpt().Root, + FinalizedEpoch: r.chain.FinalizedCheckpt().Epoch, + HeadRoot: headRoot, + HeadSlot: r.chain.HeadSlot(), } if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil { @@ -181,8 +200,12 @@ func (r *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream } func (r *Service) validateStatusMessage(msg *pb.Status, stream network.Stream) error { - if !bytes.Equal(params.BeaconConfig().GenesisForkVersion, msg.HeadForkVersion) { - return errWrongForkVersion + forkDigest, err := r.p2p.ForkDigest() + if err != nil { + return err + } + if !bytes.Equal(forkDigest[:], msg.ForkDigest) { + return errWrongForkDigestVersion } genesis := r.chain.GenesisTime() maxEpoch := slotutil.EpochsSinceGenesis(genesis) diff --git a/beacon-chain/sync/rpc_status_test.go b/beacon-chain/sync/rpc_status_test.go index fb8588665d..737eaeb630 100644 --- a/beacon-chain/sync/rpc_status_test.go +++ b/beacon-chain/sync/rpc_status_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/gogo/protobuf/proto" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/protocol" @@ -48,9 +49,9 @@ func TestHelloRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) { if code == 0 { t.Error("Expected a non-zero code") } - if errMsg != errWrongForkVersion.Error() { - t.Logf("Received error string len %d, wanted error string len %d", len(errMsg), len(errWrongForkVersion.Error())) - t.Errorf("Received unexpected message response in the stream: %s. Wanted %s.", errMsg, errWrongForkVersion.Error()) + if errMsg != errWrongForkDigestVersion.Error() { + t.Logf("Received error string len %d, wanted error string len %d", len(errMsg), len(errWrongForkDigestVersion.Error())) + t.Errorf("Received unexpected message response in the stream: %s. Wanted %s.", errMsg, errWrongForkDigestVersion.Error()) } }) @@ -59,9 +60,9 @@ func TestHelloRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) { t.Fatal(err) } - err = r.statusRPCHandler(context.Background(), &pb.Status{HeadForkVersion: []byte("fake")}, stream1) - if err != errWrongForkVersion { - t.Errorf("Expected error %v, got %v", errWrongForkVersion, err) + err = r.statusRPCHandler(context.Background(), &pb.Status{ForkDigest: []byte("fake")}, stream1) + if err != errWrongForkDigestVersion { + t.Errorf("Expected error %v, got %v", errWrongForkDigestVersion, err) } if testutil.WaitTimeout(&wg, 1*time.Second) { @@ -130,11 +131,11 @@ func TestHelloRPCHandler_ReturnsHelloMessage(t *testing.T) { t.Fatal(err) } expected := &pb.Status{ - HeadForkVersion: params.BeaconConfig().GenesisForkVersion, - HeadSlot: genesisState.Slot(), - HeadRoot: headRoot[:], - FinalizedEpoch: 5, - FinalizedRoot: finalizedRoot[:], + ForkDigest: params.BeaconConfig().GenesisForkVersion, + HeadSlot: genesisState.Slot(), + HeadRoot: headRoot[:], + FinalizedEpoch: 5, + FinalizedRoot: finalizedRoot[:], } if !proto.Equal(out, expected) { t.Errorf("Did not receive expected message. Got %+v wanted %+v", out, expected) @@ -145,7 +146,7 @@ func TestHelloRPCHandler_ReturnsHelloMessage(t *testing.T) { t.Fatal(err) } - err = r.statusRPCHandler(context.Background(), &pb.Status{HeadForkVersion: params.BeaconConfig().GenesisForkVersion}, stream1) + err = r.statusRPCHandler(context.Background(), &pb.Status{ForkDigest: params.BeaconConfig().GenesisForkVersion}, stream1) if err != nil { t.Errorf("Unxpected error: %v", err) } @@ -161,6 +162,16 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) { p1 := p2ptest.NewTestP2P(t) p2 := p2ptest.NewTestP2P(t) + p1.LocalMetadata = &pb.MetaData{ + SeqNumber: 2, + Attnets: []byte{'A', 'B'}, + } + + p2.LocalMetadata = &pb.MetaData{ + SeqNumber: 2, + Attnets: []byte{'C', 'D'}, + } + st, err := stateTrie.InitializeFromProto(&pb.BeaconState{ Slot: 5, }) @@ -179,6 +190,9 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) { }, ctx: context.Background(), } + r2 := &Service{ + p2p: p2, + } r.Start() @@ -194,7 +208,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) { } log.WithField("status", out).Warn("received status") - resp := &pb.Status{HeadSlot: 100, HeadForkVersion: params.BeaconConfig().GenesisForkVersion} + resp := &pb.Status{HeadSlot: 100, ForkDigest: params.BeaconConfig().GenesisForkVersion} if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil { t.Fatal(err) @@ -209,14 +223,44 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) { } }) + pcl = protocol.ID("/eth2/beacon_chain/req/ping/1/ssz") + var wg2 sync.WaitGroup + wg2.Add(1) + p2.Host.SetStreamHandler(pcl, func(stream network.Stream) { + defer wg2.Done() + out := new(uint64) + if err := r.p2p.Encoding().DecodeWithLength(stream, out); err != nil { + t.Fatal(err) + } + if *out != 2 { + t.Fatalf("Wanted 2 but got %d as our sequence number", *out) + } + err := r2.pingHandler(context.Background(), out, stream) + if err != nil { + t.Fatal(err) + } + if err := stream.Close(); err != nil { + t.Fatal(err) + } + }) + numInactive1 := len(p1.Peers().Inactive()) numActive1 := len(p1.Peers().Active()) p1.Connect(p2) + p1.Peers().Add(new(enr.Record), p2.Host.ID(), p2.Host.Addrs()[0], network.DirUnknown) + p1.Peers().SetMetadata(p2.Host.ID(), p2.LocalMetadata) + + p2.Peers().Add(new(enr.Record), p1.Host.ID(), p1.Host.Addrs()[0], network.DirUnknown) + p2.Peers().SetMetadata(p1.Host.ID(), p1.LocalMetadata) + if testutil.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") } + if testutil.WaitTimeout(&wg2, 1*time.Second) { + t.Fatal("Did not receive stream within 1 sec") + } // Wait for stream buffer to be read. time.Sleep(200 * time.Millisecond) @@ -302,11 +346,11 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) { t.Fatal(err) } expected := &pb.Status{ - HeadForkVersion: params.BeaconConfig().GenesisForkVersion, - HeadSlot: genesisState.Slot(), - HeadRoot: headRoot[:], - FinalizedEpoch: 5, - FinalizedRoot: finalizedRoot[:], + ForkDigest: params.BeaconConfig().GenesisForkVersion, + HeadSlot: genesisState.Slot(), + HeadRoot: headRoot[:], + FinalizedEpoch: 5, + FinalizedRoot: finalizedRoot[:], } if !proto.Equal(out, expected) { t.Errorf("Did not receive expected message. Got %+v wanted %+v", out, expected) @@ -380,11 +424,11 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) { t.Fatal(err) } expected := &pb.Status{ - HeadForkVersion: []byte{1, 1, 1, 1}, - HeadSlot: genesisState.Slot(), - HeadRoot: headRoot[:], - FinalizedEpoch: 5, - FinalizedRoot: finalizedRoot[:], + ForkDigest: []byte{1, 1, 1, 1}, + HeadSlot: genesisState.Slot(), + HeadRoot: headRoot[:], + FinalizedEpoch: 5, + FinalizedRoot: finalizedRoot[:], } if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil { log.WithError(err).Error("Failed to write to stream") diff --git a/beacon-chain/sync/service.go b/beacon-chain/sync/service.go index 5162349ab1..af1de1211b 100644 --- a/beacon-chain/sync/service.go +++ b/beacon-chain/sync/service.go @@ -5,6 +5,7 @@ import ( "sync" "time" + lru "github.com/hashicorp/golang-lru" "github.com/kevinms/leakybucket-go" "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" @@ -19,8 +20,8 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings" "github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits" "github.com/prysmaticlabs/prysm/beacon-chain/p2p" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/shared" - "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/runutil" ) @@ -28,9 +29,11 @@ var _ = shared.Service(&Service{}) const allowedBlocksPerSecond = 32.0 const allowedBlocksBurst = 10 * allowedBlocksPerSecond - -// refresh enr every quarter of an epoch -var refreshRate = (params.BeaconConfig().SecondsPerSlot * params.BeaconConfig().SlotsPerEpoch) / 4 +const seenBlockSize = 1000 +const seenAttSize = 10000 +const seenExitSize = 100 +const seenAttesterSlashingSize = 100 +const seenProposerSlashingSize = 100 // Config to set up the regular sync service. type Config struct { @@ -45,6 +48,7 @@ type Config struct { BlockNotifier blockfeed.Notifier AttestationNotifier operation.Notifier StateSummaryCache *cache.StateSummaryCache + StateGen *stategen.State } // This defines the interface for interacting with block chain service @@ -73,15 +77,16 @@ func NewRegularSync(cfg *Config) *Service { attestationNotifier: cfg.AttestationNotifier, slotToPendingBlocks: make(map[uint64]*ethpb.SignedBeaconBlock), seenPendingBlocks: make(map[[32]byte]bool), - blkRootToPendingAtts: make(map[[32]byte][]*ethpb.AggregateAttestationAndProof), + blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), stateNotifier: cfg.StateNotifier, blockNotifier: cfg.BlockNotifier, stateSummaryCache: cfg.StateSummaryCache, + stateGen: cfg.StateGen, blocksRateLimiter: leakybucket.NewCollector(allowedBlocksPerSecond, allowedBlocksBurst, false /* deleteEmptyBuckets */), } r.registerRPCHandlers() - r.registerSubscribers() + go r.registerSubscribers() return r } @@ -89,38 +94,56 @@ func NewRegularSync(cfg *Config) *Service { // Service is responsible for handling all run time p2p related operations as the // main entry point for network messages. type Service struct { - ctx context.Context - cancel context.CancelFunc - p2p p2p.P2P - db db.NoHeadAccessDatabase - attPool attestations.Pool - exitPool *voluntaryexits.Pool - slashingPool *slashings.Pool - chain blockchainService - slotToPendingBlocks map[uint64]*ethpb.SignedBeaconBlock - seenPendingBlocks map[[32]byte]bool - blkRootToPendingAtts map[[32]byte][]*ethpb.AggregateAttestationAndProof - pendingAttsLock sync.RWMutex - pendingQueueLock sync.RWMutex - chainStarted bool - initialSync Checker - validateBlockLock sync.RWMutex - stateNotifier statefeed.Notifier - blockNotifier blockfeed.Notifier - blocksRateLimiter *leakybucket.Collector - attestationNotifier operation.Notifier - stateSummaryCache *cache.StateSummaryCache + ctx context.Context + cancel context.CancelFunc + p2p p2p.P2P + db db.NoHeadAccessDatabase + attPool attestations.Pool + exitPool *voluntaryexits.Pool + slashingPool *slashings.Pool + chain blockchainService + slotToPendingBlocks map[uint64]*ethpb.SignedBeaconBlock + seenPendingBlocks map[[32]byte]bool + blkRootToPendingAtts map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof + pendingAttsLock sync.RWMutex + pendingQueueLock sync.RWMutex + chainStarted bool + initialSync Checker + validateBlockLock sync.RWMutex + stateNotifier statefeed.Notifier + blockNotifier blockfeed.Notifier + blocksRateLimiter *leakybucket.Collector + attestationNotifier operation.Notifier + seenBlockLock sync.RWMutex + seenBlockCache *lru.Cache + seenAttestationLock sync.RWMutex + seenAttestationCache *lru.Cache + seenExitLock sync.RWMutex + seenExitCache *lru.Cache + seenProposerSlashingLock sync.RWMutex + seenProposerSlashingCache *lru.Cache + seenAttesterSlashingLock sync.RWMutex + seenAttesterSlashingCache *lru.Cache + stateSummaryCache *cache.StateSummaryCache + stateGen *stategen.State } // Start the regular sync service. func (r *Service) Start() { - r.p2p.AddConnectionHandler(r.sendRPCStatusRequest) + if err := r.initCaches(); err != nil { + panic(err) + } + + r.p2p.AddConnectionHandler(r.reValidatePeer) r.p2p.AddDisconnectionHandler(r.removeDisconnectedPeerStatus) + r.p2p.AddPingMethod(r.sendPingRequest) r.processPendingBlocksQueue() r.processPendingAttsQueue() r.maintainPeerStatuses() r.resyncIfBehind() - r.refreshENR() + + // Update sync metrics. + runutil.RunEvery(r.ctx, time.Second*10, r.updateMetrics) } // Stop the regular sync service. @@ -137,14 +160,46 @@ func (r *Service) Status() error { } // If our head slot is on a previous epoch and our peers are reporting their head block are // in the most recent epoch, then we might be out of sync. - if headEpoch := helpers.SlotToEpoch(r.chain.HeadSlot()); headEpoch < helpers.SlotToEpoch(r.chain.CurrentSlot())-1 && - headEpoch < r.p2p.Peers().CurrentEpoch()-1 { + if headEpoch := helpers.SlotToEpoch(r.chain.HeadSlot()); headEpoch+1 < helpers.SlotToEpoch(r.chain.CurrentSlot()) && + headEpoch+1 < r.p2p.Peers().CurrentEpoch() { return errors.New("out of sync") } } return nil } +// This initializes the caches to update seen beacon objects coming in from the wire +// and prevent DoS. +func (r *Service) initCaches() error { + blkCache, err := lru.New(seenBlockSize) + if err != nil { + return err + } + attCache, err := lru.New(seenAttSize) + if err != nil { + return err + } + exitCache, err := lru.New(seenExitSize) + if err != nil { + return err + } + attesterSlashingCache, err := lru.New(seenAttesterSlashingSize) + if err != nil { + return err + } + proposerSlashingCache, err := lru.New(seenProposerSlashingSize) + if err != nil { + return err + } + r.seenBlockCache = blkCache + r.seenAttestationCache = attCache + r.seenExitCache = exitCache + r.seenAttesterSlashingCache = attesterSlashingCache + r.seenProposerSlashingCache = proposerSlashingCache + + return nil +} + // Checker defines a struct which can verify whether a node is currently // synchronizing a chain with the rest of peers in the network. type Checker interface { @@ -152,13 +207,3 @@ type Checker interface { Status() error Resync() error } - -// This runs every epoch to refresh the current node's ENR. -func (r *Service) refreshENR() { - ctx := context.Background() - refreshTime := time.Duration(refreshRate) * time.Second - runutil.RunEvery(ctx, refreshTime, func() { - currentEpoch := helpers.SlotToEpoch(helpers.SlotsSince(r.chain.GenesisTime())) - r.p2p.RefreshENR(currentEpoch) - }) -} diff --git a/beacon-chain/sync/service_test.go b/beacon-chain/sync/service_test.go new file mode 100644 index 0000000000..d9cf9c48b0 --- /dev/null +++ b/beacon-chain/sync/service_test.go @@ -0,0 +1,33 @@ +package sync + +import ( + "testing" + "time" + + mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" + p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" + stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" +) + +func TestService_StatusZeroEpoch(t *testing.T) { + bState, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 0}) + if err != nil { + t.Fatal(err) + } + r := &Service{ + p2p: p2ptest.NewTestP2P(t), + initialSync: new(mockSync.Sync), + chain: &mockChain.ChainService{ + Genesis: time.Now(), + State: bState, + }, + } + r.chainStarted = true + + err = r.Status() + if err != nil { + t.Errorf("Wanted non failing status but got: %v", err) + } +} diff --git a/beacon-chain/sync/subscriber.go b/beacon-chain/sync/subscriber.go index 3f93e5d240..d4be1dde03 100644 --- a/beacon-chain/sync/subscriber.go +++ b/beacon-chain/sync/subscriber.go @@ -5,12 +5,14 @@ import ( "fmt" "reflect" "runtime/debug" + "strings" "time" "github.com/gogo/protobuf/proto" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" pb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "go.opencensus.io/trace" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/beacon-chain/p2p" @@ -18,11 +20,12 @@ import ( "github.com/prysmaticlabs/prysm/shared/messagehandler" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/roughtime" + "github.com/prysmaticlabs/prysm/shared/slotutil" "github.com/prysmaticlabs/prysm/shared/traceutil" - "go.opencensus.io/trace" ) const pubsubMessageTimeout = 30 * time.Second +const maximumGossipClockDisparity = 500 * time.Millisecond // subHandler represents handler for a given subscription. type subHandler func(context.Context, proto.Message) error @@ -40,72 +43,69 @@ func (r *Service) noopValidator(ctx context.Context, _ peer.ID, msg *pubsub.Mess // Register PubSub subscribers func (r *Service) registerSubscribers() { - go func() { - // Wait until chain start. - stateChannel := make(chan *feed.Event, 1) - stateSub := r.stateNotifier.StateFeed().Subscribe(stateChannel) - defer stateSub.Unsubscribe() - for r.chainStarted == false { - select { - case event := <-stateChannel: - if event.Type == statefeed.Initialized { - data, ok := event.Data.(*statefeed.InitializedData) - if !ok { - log.Error("Event feed data is not type *statefeed.InitializedData") - return - } - log.WithField("starttime", data.StartTime).Debug("Received state initialized event") - if data.StartTime.After(roughtime.Now()) { - stateSub.Unsubscribe() - time.Sleep(roughtime.Until(data.StartTime)) - } - r.chainStarted = true + // Wait until chain start. + stateChannel := make(chan *feed.Event, 1) + stateSub := r.stateNotifier.StateFeed().Subscribe(stateChannel) + defer stateSub.Unsubscribe() + for r.chainStarted == false { + select { + case event := <-stateChannel: + if event.Type == statefeed.Initialized { + data, ok := event.Data.(*statefeed.InitializedData) + if !ok { + log.Error("Event feed data is not type *statefeed.InitializedData") + return } - case <-r.ctx.Done(): - log.Debug("Context closed, exiting goroutine") - return - case err := <-stateSub.Err(): - log.WithError(err).Error("Subscription to state notifier failed") - return + log.WithField("starttime", data.StartTime).Debug("Received state initialized event") + if data.StartTime.After(roughtime.Now()) { + stateSub.Unsubscribe() + time.Sleep(roughtime.Until(data.StartTime)) + } + r.chainStarted = true } + case <-r.ctx.Done(): + log.Debug("Context closed, exiting goroutine") + return + case err := <-stateSub.Err(): + log.WithError(err).Error("Subscription to state notifier failed") + return } - }() + } r.subscribe( - "/eth2/beacon_block", + "/eth2/%x/beacon_block", r.validateBeaconBlockPubSub, r.beaconBlockSubscriber, ) r.subscribe( - "/eth2/beacon_aggregate_and_proof", + "/eth2/%x/beacon_aggregate_and_proof", r.validateAggregateAndProof, r.beaconAggregateProofSubscriber, ) r.subscribe( - "/eth2/voluntary_exit", + "/eth2/%x/voluntary_exit", r.validateVoluntaryExit, r.voluntaryExitSubscriber, ) r.subscribe( - "/eth2/proposer_slashing", + "/eth2/%x/proposer_slashing", r.validateProposerSlashing, r.proposerSlashingSubscriber, ) r.subscribe( - "/eth2/attester_slashing", + "/eth2/%x/attester_slashing", r.validateAttesterSlashing, r.attesterSlashingSubscriber, ) - if featureconfig.Get().EnableDynamicCommitteeSubnets { - r.subscribeDynamicWithSubnets( - "/eth2/committee_index%d_beacon_attestation", - r.committeeIndices, /* determineSubsLen */ + if featureconfig.Get().DisableDynamicCommitteeSubnets { + r.subscribeDynamic( + "/eth2/%x/committee_index%d_beacon_attestation", + r.committeesCount, /* determineSubsLen */ r.validateCommitteeIndexBeaconAttestation, /* validator */ r.committeeIndexBeaconAttestationSubscriber, /* message handler */ ) } else { - r.subscribeDynamic( - "/eth2/committee_index%d_beacon_attestation", - r.committeesCount, /* determineSubsLen */ + r.subscribeDynamicWithSubnets( + "/eth2/%x/committee_index%d_beacon_attestation", r.validateCommitteeIndexBeaconAttestation, /* validator */ r.committeeIndexBeaconAttestationSubscriber, /* message handler */ ) @@ -119,7 +119,7 @@ func (r *Service) subscribe(topic string, validator pubsub.Validator, handle sub if base == nil { panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) } - return r.subscribeWithBase(base, topic, validator, handle) + return r.subscribeWithBase(base, r.addDigestToTopic(topic), validator, handle) } func (r *Service) subscribeWithBase(base proto.Message, topic string, validator pubsub.Validator, handle subHandler) *pubsub.Subscription { @@ -211,75 +211,46 @@ func wrapAndReportValidation(topic string, v pubsub.Validator) (string, pubsub.V // maintained. func (r *Service) subscribeDynamicWithSubnets( topicFormat string, - determineSubIndices func() []uint64, validate pubsub.Validator, handle subHandler, ) { base := p2p.GossipTopicMappings[topicFormat] if base == nil { - panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat)) + log.Fatalf("%s is not mapped to any message in GossipTopicMappings", topicFormat) + } + digest, err := r.p2p.ForkDigest() + if err != nil { + log.WithError(err).Fatal("Could not compute fork digest") } - subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().MaxCommitteesPerSlot) + genesis := r.chain.GenesisTime() + ticker := slotutil.GetSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot) - stateChannel := make(chan *feed.Event, 1) - stateSub := r.stateNotifier.StateFeed().Subscribe(stateChannel) go func() { for { select { case <-r.ctx.Done(): - stateSub.Unsubscribe() + ticker.Done() return - case <-stateChannel: + case currentSlot := <-ticker.C(): if r.chainStarted && r.initialSync.Syncing() { continue } - // Update desired topic indices. - wantedSubs := determineSubIndices() + // Update desired topic indices for aggregator + wantedSubs := r.aggregatorCommitteeIndices(currentSlot) // Resize as appropriate. - for k, v := range subscriptions { - var wanted bool - for _, idx := range wantedSubs { - if k == idx { - wanted = true - break - } - } - if !wanted && v != nil { - v.Cancel() - if err := r.p2p.PubSub().UnregisterTopicValidator(fmt.Sprintf(topicFormat, k)); err != nil { - log.WithError(err).Error("Failed to unregister topic validator") - } - delete(subscriptions, k) - } - } + r.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat) + for _, idx := range wantedSubs { if _, exists := subscriptions[idx]; !exists { - // do not subscribe if we have no peers in the same - // subnet - topic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})] - subnetTopic := fmt.Sprintf(topic, idx) - numOfPeers := r.p2p.PubSub().ListPeers(subnetTopic) - if len(r.p2p.Peers().SubscribedToSubnet(idx)) == 0 && len(numOfPeers) == 0 { - log.Debugf("No peers found subscribed to attestation gossip subnet with "+ - "committee index %d. Searching network for peers subscribed to the subnet.", idx) - go func(idx uint64) { - peerExists, err := r.p2p.FindPeersWithSubnet(idx) - if err != nil { - log.Errorf("Could not search for peers: %v", err) - return - } - // do not subscribe if we couldn't find a connected peer. - if !peerExists { - return - } - subscriptions[idx] = r.subscribeWithBase(base, subnetTopic, validate, handle) - }(idx) - continue - } - subscriptions[idx] = r.subscribeWithBase(base, subnetTopic, validate, handle) + r.subscribeMissingSubnet(subscriptions, idx, base, digest, validate, handle) } } + // find desired subs for attesters + attesterSubs := r.attesterCommitteeIndices(currentSlot) + for _, idx := range attesterSubs { + r.lookupAttesterSubnets(digest, idx) + } } } }() @@ -293,9 +264,12 @@ func (r *Service) subscribeDynamicWithSubnets( func (r *Service) subscribeDynamic(topicFormat string, determineSubsLen func() int, validate pubsub.Validator, handle subHandler) { base := p2p.GossipTopicMappings[topicFormat] if base == nil { - panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat)) + log.Fatalf("%s is not mapped to any message in GossipTopicMappings", topicFormat) + } + digest, err := r.p2p.ForkDigest() + if err != nil { + log.WithError(err).Fatal("Could not compute fork digest") } - var subscriptions []*pubsub.Subscription stateChannel := make(chan *feed.Event, 1) @@ -324,7 +298,7 @@ func (r *Service) subscribeDynamic(topicFormat string, determineSubsLen func() i } } else if len(subscriptions) < wantedSubs { // Increase topics for i := len(subscriptions); i < wantedSubs; i++ { - sub := r.subscribeWithBase(base, fmt.Sprintf(topicFormat, i), validate, handle) + sub := r.subscribeWithBase(base, fmt.Sprintf(topicFormat, digest, i), validate, handle) subscriptions = append(subscriptions, sub) } } @@ -332,3 +306,87 @@ func (r *Service) subscribeDynamic(topicFormat string, determineSubsLen func() i } }() } + +// revalidate that our currently connected subnets are valid. +func (r *Service) reValidateSubscriptions(subscriptions map[uint64]*pubsub.Subscription, + wantedSubs []uint64, topicFormat string) { + for k, v := range subscriptions { + var wanted bool + for _, idx := range wantedSubs { + if k == idx { + wanted = true + break + } + } + if !wanted && v != nil { + v.Cancel() + if err := r.p2p.PubSub().UnregisterTopicValidator(fmt.Sprintf(topicFormat, k)); err != nil { + log.WithError(err).Error("Failed to unregister topic validator") + } + delete(subscriptions, k) + } + } +} + +// subscribe missing subnets for our aggregators. +func (r *Service) subscribeMissingSubnet(subscriptions map[uint64]*pubsub.Subscription, idx uint64, + base proto.Message, digest [4]byte, validate pubsub.Validator, handle subHandler) { + // do not subscribe if we have no peers in the same + // subnet + topic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})] + subnetTopic := fmt.Sprintf(topic, digest, idx) + if !r.validPeersExist(subnetTopic, idx) { + log.Debugf("No peers found subscribed to attestation gossip subnet with "+ + "committee index %d. Searching network for peers subscribed to the subnet.", idx) + go func(idx uint64) { + peerExists, err := r.p2p.FindPeersWithSubnet(idx) + if err != nil { + log.Errorf("Could not search for peers: %v", err) + return + } + // do not subscribe if we couldn't find a connected peer. + if !peerExists { + return + } + subscriptions[idx] = r.subscribeWithBase(base, subnetTopic, validate, handle) + }(idx) + return + } + subscriptions[idx] = r.subscribeWithBase(base, subnetTopic, validate, handle) +} + +// lookup peers for attester specific subnets. +func (r *Service) lookupAttesterSubnets(digest [4]byte, idx uint64) { + topic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})] + subnetTopic := fmt.Sprintf(topic, digest, idx) + if !r.validPeersExist(subnetTopic, idx) { + log.Debugf("No peers found subscribed to attestation gossip subnet with "+ + "committee index %d. Searching network for peers subscribed to the subnet.", idx) + go func(idx uint64) { + // perform a search for peers with the desired committee index. + _, err := r.p2p.FindPeersWithSubnet(idx) + if err != nil { + log.Errorf("Could not search for peers: %v", err) + return + } + }(idx) + } +} + +// find if we have peers who are subscribed to the same subnet +func (r *Service) validPeersExist(subnetTopic string, idx uint64) bool { + numOfPeers := r.p2p.PubSub().ListPeers(subnetTopic + r.p2p.Encoding().ProtocolSuffix()) + return len(r.p2p.Peers().SubscribedToSubnet(idx)) > 0 || len(numOfPeers) > 0 +} + +// Add fork digest to topic. +func (r *Service) addDigestToTopic(topic string) string { + if !strings.Contains(topic, "%x") { + log.Fatal("Topic does not have appropriate formatter for digest") + } + digest, err := r.p2p.ForkDigest() + if err != nil { + log.WithError(err).Fatal("Could not compute fork digest") + } + return fmt.Sprintf(topic, digest) +} diff --git a/beacon-chain/sync/subscriber_beacon_aggregate_proof.go b/beacon-chain/sync/subscriber_beacon_aggregate_proof.go index 7ef85c2226..6bea5d9725 100644 --- a/beacon-chain/sync/subscriber_beacon_aggregate_proof.go +++ b/beacon-chain/sync/subscriber_beacon_aggregate_proof.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "fmt" "github.com/gogo/protobuf/proto" @@ -11,10 +12,15 @@ import ( // beaconAggregateProofSubscriber forwards the incoming validated aggregated attestation and proof to the // attestation pool for processing. func (r *Service) beaconAggregateProofSubscriber(ctx context.Context, msg proto.Message) error { - a, ok := msg.(*ethpb.AggregateAttestationAndProof) + a, ok := msg.(*ethpb.SignedAggregateAttestationAndProof) if !ok { - return fmt.Errorf("message was not type *eth.AggregateAttestationAndProof, type=%T", msg) + return fmt.Errorf("message was not type *eth.SignedAggregateAttestationAndProof, type=%T", msg) } - return r.attPool.SaveAggregatedAttestation(a.Aggregate) + if a.Message.Aggregate == nil || a.Message.Aggregate.Data == nil { + return errors.New("nil aggregate") + } + r.setAggregatorIndexSlotSeen(a.Message.Aggregate.Data.Slot, a.Message.AggregatorIndex) + + return r.attPool.SaveAggregatedAttestation(a.Message.Aggregate) } diff --git a/beacon-chain/sync/subscriber_beacon_aggregate_proof_test.go b/beacon-chain/sync/subscriber_beacon_aggregate_proof_test.go index 9fc6898a4a..436cb41730 100644 --- a/beacon-chain/sync/subscriber_beacon_aggregate_proof_test.go +++ b/beacon-chain/sync/subscriber_beacon_aggregate_proof_test.go @@ -5,22 +5,28 @@ import ( "reflect" "testing" + lru "github.com/hashicorp/golang-lru" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" ) func TestBeaconAggregateProofSubscriber_CanSave(t *testing.T) { + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ - attPool: attestations.NewPool(), + attPool: attestations.NewPool(), + seenAttestationCache: c, } - a := ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{AggregationBits: bitfield.Bitlist{0x07}, Data: ðpb.AttestationData{Slot: 4}}, AggregatorIndex: 100} + a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0x07}}, AggregatorIndex: 100}} if err := r.beaconAggregateProofSubscriber(context.Background(), a); err != nil { t.Fatal(err) } - if !reflect.DeepEqual(r.attPool.AggregatedAttestations(), []*ethpb.Attestation{a.Aggregate}) { + if !reflect.DeepEqual(r.attPool.AggregatedAttestations(), []*ethpb.Attestation{a.Message.Aggregate}) { t.Error("Did not save aggregated attestation") } } diff --git a/beacon-chain/sync/subscriber_beacon_blocks.go b/beacon-chain/sync/subscriber_beacon_blocks.go index 4e7d97a7e5..d0d3d72ae8 100644 --- a/beacon-chain/sync/subscriber_beacon_blocks.go +++ b/beacon-chain/sync/subscriber_beacon_blocks.go @@ -6,12 +6,10 @@ import ( "github.com/gogo/protobuf/proto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state/interop" - "github.com/prysmaticlabs/prysm/shared/bytesutil" ) func (r *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message) error { @@ -24,39 +22,10 @@ func (r *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message) return errors.New("nil block") } + r.setSeenBlockIndexSlot(signed.Block.Slot, signed.Block.ProposerIndex) + block := signed.Block - headState, err := r.chain.HeadState(ctx) - if err != nil { - log.Errorf("Head state is not available: %v", err) - return nil - } - // Ignore block older than last finalized checkpoint. - if block.Slot < helpers.StartSlot(headState.FinalizedCheckpointEpoch()) { - log.Debugf("Received a block older than finalized checkpoint, %d < %d", - block.Slot, helpers.StartSlot(headState.FinalizedCheckpointEpoch())) - return nil - } - - blockRoot, err := ssz.HashTreeRoot(block) - if err != nil { - log.Errorf("Could not sign root block: %v", err) - return nil - } - - if r.db.HasBlock(ctx, blockRoot) { - return nil - } - - // Handle block when the parent is unknown. - if !r.db.HasBlock(ctx, bytesutil.ToBytes32(block.ParentRoot)) { - r.pendingQueueLock.Lock() - r.slotToPendingBlocks[block.Slot] = signed - r.seenPendingBlocks[blockRoot] = true - r.pendingQueueLock.Unlock() - return nil - } - // Broadcast the block on a feed to notify other services in the beacon node // of a received block (even if it does not process correctly through a state transition). r.blockNotifier.BlockFeed().Send(&feed.Event{ @@ -66,7 +35,7 @@ func (r *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message) }, }) - err = r.chain.ReceiveBlockNoPubsub(ctx, signed) + err := r.chain.ReceiveBlockNoPubsub(ctx, signed) if err != nil { interop.WriteBlockToDisk(signed, true /*failed*/) } diff --git a/beacon-chain/sync/subscriber_beacon_blocks_test.go b/beacon-chain/sync/subscriber_beacon_blocks_test.go index ecce1bed38..540b263fa2 100644 --- a/beacon-chain/sync/subscriber_beacon_blocks_test.go +++ b/beacon-chain/sync/subscriber_beacon_blocks_test.go @@ -1,72 +1,19 @@ package sync import ( - "context" - "fmt" "reflect" "testing" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/go-ssz" - mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" - dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" - stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" - pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" - "github.com/prysmaticlabs/prysm/shared/params" - "github.com/prysmaticlabs/prysm/shared/testutil" "github.com/sirupsen/logrus" - logTest "github.com/sirupsen/logrus/hooks/test" ) func init() { logrus.SetLevel(logrus.DebugLevel) } -func TestRegularSyncBeaconBlockSubscriber_FilterByFinalizedEpoch(t *testing.T) { - hook := logTest.NewGlobal() - db := dbtest.SetupDB(t) - defer dbtest.TeardownDB(t, db) - - s, err := stateTrie.InitializeFromProto(&pb.BeaconState{ - FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1}, - }) - if err != nil { - t.Fatal(err) - } - parent := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}} - if err := db.SaveBlock(context.Background(), parent); err != nil { - t.Fatal(err) - } - parentRoot, err := ssz.HashTreeRoot(parent.Block) - if err != nil { - t.Fatal(err) - } - chain := &mock.ChainService{State: s} - r := &Service{ - db: db, - chain: chain, - blockNotifier: chain.BlockNotifier(), - attPool: attestations.NewPool(), - } - - b := ðpb.SignedBeaconBlock{ - Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: parentRoot[:], Body: ðpb.BeaconBlockBody{}}, - } - if err := r.beaconBlockSubscriber(context.Background(), b); err != nil { - t.Fatal(err) - } - testutil.AssertLogsContain(t, hook, fmt.Sprintf("Received a block older than finalized checkpoint, 1 < %d", params.BeaconConfig().SlotsPerEpoch)) - - hook.Reset() - b.Block.Slot = params.BeaconConfig().SlotsPerEpoch - if err := r.beaconBlockSubscriber(context.Background(), b); err != nil { - t.Fatal(err) - } - testutil.AssertLogsDoNotContain(t, hook, "Received a block older than finalized checkpoint") -} - func TestDeleteAttsInPool(t *testing.T) { r := &Service{ attPool: attestations.NewPool(), diff --git a/beacon-chain/sync/subscriber_committee_index_beacon_attestation.go b/beacon-chain/sync/subscriber_committee_index_beacon_attestation.go index b44367f52f..fe1a4580ee 100644 --- a/beacon-chain/sync/subscriber_committee_index_beacon_attestation.go +++ b/beacon-chain/sync/subscriber_committee_index_beacon_attestation.go @@ -11,6 +11,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/sliceutil" ) @@ -20,6 +21,11 @@ func (r *Service) committeeIndexBeaconAttestationSubscriber(ctx context.Context, return fmt.Errorf("message was not type *eth.Attestation, type=%T", msg) } + if a.Data == nil { + return errors.New("nil attestation") + } + r.setSeenCommitteeIndicesSlot(a.Data.Slot, a.Data.CommitteeIndex, a.AggregationBits) + exists, err := r.attPool.HasAggregatedAttestation(a) if err != nil { return errors.Wrap(err, "failed to determine if attestation pool has this atttestation") @@ -48,8 +54,22 @@ func (r *Service) committeesCount() int { return int(helpers.SlotCommitteeCount(uint64(len(activeValidatorIndices)))) } -func (r *Service) committeeIndices() []uint64 { - currentEpoch := helpers.SlotToEpoch(r.chain.HeadSlot()) - return sliceutil.UnionUint64(cache.CommitteeIDs.GetIDs(currentEpoch), - cache.CommitteeIDs.GetIDs(currentEpoch+1)) +func (r *Service) aggregatorCommitteeIndices(currentSlot uint64) []uint64 { + endEpoch := helpers.SlotToEpoch(currentSlot) + 1 + endSlot := endEpoch * params.BeaconConfig().SlotsPerEpoch + commIds := []uint64{} + for i := currentSlot; i <= endSlot; i++ { + commIds = append(commIds, cache.CommitteeIDs.GetAggregatorCommitteeIDs(i)...) + } + return sliceutil.SetUint64(commIds) +} + +func (r *Service) attesterCommitteeIndices(currentSlot uint64) []uint64 { + endEpoch := helpers.SlotToEpoch(currentSlot) + 1 + endSlot := endEpoch * params.BeaconConfig().SlotsPerEpoch + commIds := []uint64{} + for i := currentSlot; i <= endSlot; i++ { + commIds = append(commIds, cache.CommitteeIDs.GetAttesterCommitteeIDs(i)...) + } + return sliceutil.SetUint64(commIds) } diff --git a/beacon-chain/sync/subscriber_committee_index_beacon_attestation_test.go b/beacon-chain/sync/subscriber_committee_index_beacon_attestation_test.go index c82f04b191..3d80d79908 100644 --- a/beacon-chain/sync/subscriber_committee_index_beacon_attestation_test.go +++ b/beacon-chain/sync/subscriber_committee_index_beacon_attestation_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + lru "github.com/hashicorp/golang-lru" eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/go-ssz" @@ -15,14 +16,16 @@ import ( dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" - beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing" - pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/testutil" ) func TestService_committeeIndexBeaconAttestationSubscriber_ValidMessage(t *testing.T) { p := p2ptest.NewTestP2P(t) + fc := featureconfig.Get() + fc.DisableDynamicCommitteeSubnets = true + featureconfig.Init(fc) ctx := context.Background() db := dbtest.SetupDB(t) @@ -42,14 +45,16 @@ func TestService_committeeIndexBeaconAttestationSubscriber_ValidMessage(t *testi if err := db.SaveBlock(ctx, blk); err != nil { t.Fatal(err) } - savedState, err := beaconstate.InitializeFromProto(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + + savedState := testutil.NewBeaconState() if err := db.SaveState(context.Background(), savedState, root); err != nil { t.Fatal(err) } + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ attPool: attestations.NewPool(), chain: &mock.ChainService{ @@ -57,14 +62,15 @@ func TestService_committeeIndexBeaconAttestationSubscriber_ValidMessage(t *testi Genesis: time.Now(), ValidAttestation: true, }, - chainStarted: true, - p2p: p, - db: db, - ctx: ctx, - stateNotifier: (&mock.ChainService{}).StateNotifier(), - attestationNotifier: (&mock.ChainService{}).OperationNotifier(), - initialSync: &mockSync.Sync{IsSyncing: false}, - stateSummaryCache: cache.NewStateSummaryCache(), + chainStarted: true, + p2p: p, + db: db, + ctx: ctx, + stateNotifier: (&mock.ChainService{}).StateNotifier(), + attestationNotifier: (&mock.ChainService{}).OperationNotifier(), + initialSync: &mockSync.Sync{IsSyncing: false}, + seenAttestationCache: c, + stateSummaryCache: cache.NewStateSummaryCache(), } r.registerSubscribers() r.stateNotifier.StateFeed().Send(&feed.Event{ @@ -80,10 +86,10 @@ func TestService_committeeIndexBeaconAttestationSubscriber_ValidMessage(t *testi BeaconBlockRoot: root[:], }, AggregationBits: bitfield.Bitlist{0b0101}, - Signature: sKeys[0].Sign([]byte("foo"), 0).Marshal(), + Signature: sKeys[0].Sign([]byte("foo")).Marshal(), } - p.ReceivePubSub("/eth2/committee_index0_beacon_attestation", att) + p.ReceivePubSub("/eth2/%x/committee_index0_beacon_attestation", att) time.Sleep(time.Second) diff --git a/beacon-chain/sync/subscriber_handlers.go b/beacon-chain/sync/subscriber_handlers.go index 8c92705522..6bf5c596d5 100644 --- a/beacon-chain/sync/subscriber_handlers.go +++ b/beacon-chain/sync/subscriber_handlers.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" "github.com/prysmaticlabs/prysm/shared/bytesutil" @@ -15,6 +16,12 @@ func (r *Service) voluntaryExitSubscriber(ctx context.Context, msg proto.Message if !ok { return fmt.Errorf("wrong type, expected: *ethpb.SignedVoluntaryExit got: %T", msg) } + + if ve.Exit == nil { + return errors.New("exit can't be nil") + } + r.setExitIndexSeen(ve.Exit.ValidatorIndex) + s, err := r.chain.HeadState(ctx) if err != nil { return err @@ -37,7 +44,10 @@ func (r *Service) attesterSlashingSubscriber(ctx context.Context, msg proto.Mess if s == nil { return fmt.Errorf("no state found for block root %#x", as.Attestation_1.Data.BeaconBlockRoot) } - return r.slashingPool.InsertAttesterSlashing(ctx, s, as) + if err := r.slashingPool.InsertAttesterSlashing(ctx, s, as); err != nil { + return errors.Wrap(err, "could not insert attester slashing into pool") + } + r.setAttesterSlashingIndicesSeen(as.Attestation_1.AttestingIndices, as.Attestation_2.AttestingIndices) } return nil } @@ -57,7 +67,10 @@ func (r *Service) proposerSlashingSubscriber(ctx context.Context, msg proto.Mess if s == nil { return fmt.Errorf("no state found for block root %#x", root) } - return r.slashingPool.InsertProposerSlashing(ctx, s, ps) + if err := r.slashingPool.InsertProposerSlashing(ctx, s, ps); err != nil { + return errors.Wrap(err, "could not insert proposer slashing into pool") + } + r.setProposerSlashingIndexSeen(ps.Header_1.Header.ProposerIndex) } return nil } diff --git a/beacon-chain/sync/subscriber_test.go b/beacon-chain/sync/subscriber_test.go index 52c80def73..b03927a27c 100644 --- a/beacon-chain/sync/subscriber_test.go +++ b/beacon-chain/sync/subscriber_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/gogo/protobuf/proto" + lru "github.com/hashicorp/golang-lru" pb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" @@ -31,7 +32,7 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) { p2p: p2p, initialSync: &mockSync.Sync{IsSyncing: false}, } - topic := "/eth2/voluntary_exit" + topic := "/eth2/%x/voluntary_exit" var wg sync.WaitGroup wg.Add(1) @@ -61,18 +62,23 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) { d := db.SetupDB(t) defer db.TeardownDB(t, d) chainService := &mockChain.ChainService{} - r := Service{ - ctx: ctx, - p2p: p2p, - initialSync: &mockSync.Sync{IsSyncing: false}, - slashingPool: slashings.NewPool(), - chain: chainService, - db: d, + c, err := lru.New(10) + if err != nil { + t.Fatal(err) } - topic := "/eth2/attester_slashing" + r := Service{ + ctx: ctx, + p2p: p2p, + initialSync: &mockSync.Sync{IsSyncing: false}, + slashingPool: slashings.NewPool(), + chain: chainService, + db: d, + seenAttesterSlashingCache: c, + } + topic := "/eth2/%x/attester_slashing" var wg sync.WaitGroup wg.Add(1) - params.OverrideBeaconConfig(params.MinimalSpecConfig()) + params.OverrideBeaconConfig(params.MainnetConfig()) r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error { if err := r.attesterSlashingSubscriber(ctx, msg); err != nil { t.Fatal(err) @@ -80,7 +86,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) { wg.Done() return nil }) - beaconState, privKeys := testutil.DeterministicGenesisState(t, params.BeaconConfig().MinGenesisActiveValidatorCount) + beaconState, privKeys := testutil.DeterministicGenesisState(t, 64) chainService.State = beaconState r.chainStarted = true attesterSlashing, err := testutil.GenerateAttesterSlashingForValidator( @@ -112,18 +118,23 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) { chainService := &mockChain.ChainService{} d := db.SetupDB(t) defer db.TeardownDB(t, d) - r := Service{ - ctx: ctx, - p2p: p2p, - initialSync: &mockSync.Sync{IsSyncing: false}, - slashingPool: slashings.NewPool(), - chain: chainService, - db: d, + c, err := lru.New(10) + if err != nil { + t.Fatal(err) } - topic := "/eth2/proposer_slashing" + r := Service{ + ctx: ctx, + p2p: p2p, + initialSync: &mockSync.Sync{IsSyncing: false}, + slashingPool: slashings.NewPool(), + chain: chainService, + db: d, + seenProposerSlashingCache: c, + } + topic := "/eth2/%x/proposer_slashing" var wg sync.WaitGroup wg.Add(1) - params.OverrideBeaconConfig(params.MinimalSpecConfig()) + params.OverrideBeaconConfig(params.MainnetConfig()) r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error { if err := r.proposerSlashingSubscriber(ctx, msg); err != nil { t.Fatal(err) @@ -131,7 +142,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) { wg.Done() return nil }) - beaconState, privKeys := testutil.DeterministicGenesisState(t, params.BeaconConfig().MinGenesisActiveValidatorCount) + beaconState, privKeys := testutil.DeterministicGenesisState(t, 64) chainService.State = beaconState r.chainStarted = true proposerSlashing, err := testutil.GenerateProposerSlashingForValidator( @@ -168,8 +179,9 @@ func TestSubscribe_WaitToSync(t *testing.T) { initialSync: &mockSync.Sync{IsSyncing: false}, } - topic := "/eth2/beacon_block" - r.registerSubscribers() + topic := "/eth2/%x/beacon_block" + go r.registerSubscribers() + time.Sleep(100 * time.Millisecond) i := r.stateNotifier.StateFeed().Send(&feed.Event{ Type: statefeed.Initialized, Data: &statefeed.InitializedData{ @@ -190,7 +202,7 @@ func TestSubscribe_WaitToSync(t *testing.T) { Block: &pb.BeaconBlock{ ParentRoot: testutil.Random32Bytes(t), }, - Signature: sk.Sign([]byte("data"), 0).Marshal(), + Signature: sk.Sign([]byte("data")).Marshal(), } p2p.ReceivePubSub(topic, msg) // wait for chainstart to be sent diff --git a/beacon-chain/sync/validate_aggregate_proof.go b/beacon-chain/sync/validate_aggregate_proof.go index 0325574120..501efd0168 100644 --- a/beacon-chain/sync/validate_aggregate_proof.go +++ b/beacon-chain/sync/validate_aggregate_proof.go @@ -8,7 +8,6 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" @@ -45,13 +44,21 @@ func (r *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms traceutil.AnnotateError(span, err) return false } - m, ok := raw.(*ethpb.AggregateAttestationAndProof) + m, ok := raw.(*ethpb.SignedAggregateAttestationAndProof) if !ok { return false } + if m.Message == nil || m.Message.Aggregate == nil || m.Message.Aggregate.Data == nil { + return false + } + // Verify this is the first aggregate received from the aggregator with index and slot. + if r.hasSeenAggregatorIndexSlot(m.Message.Aggregate.Data.Slot, m.Message.AggregatorIndex) { + return false + } + // Verify aggregate attestation has not already been seen via aggregate gossip, within a block, or through the creation locally. - seen, err := r.attPool.HasAggregatedAttestation(m.Aggregate) + seen, err := r.attPool.HasAggregatedAttestation(m.Message.Aggregate) if err != nil { traceutil.AnnotateError(span, err) return false @@ -67,27 +74,25 @@ func (r *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms return false } - if !featureconfig.Get().DisableStrictAttestationPubsubVerification && !r.chain.IsValidAttestation(ctx, m.Aggregate) { + if !featureconfig.Get().DisableStrictAttestationPubsubVerification && !r.chain.IsValidAttestation(ctx, m.Message.Aggregate) { return false } + r.setAggregatorIndexSlotSeen(m.Message.Aggregate.Data.Slot, m.Message.AggregatorIndex) + msg.ValidatorData = m return true } -func (r *Service) validateAggregatedAtt(ctx context.Context, a *ethpb.AggregateAttestationAndProof) bool { +func (r *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.SignedAggregateAttestationAndProof) bool { ctx, span := trace.StartSpan(ctx, "sync.validateAggregatedAtt") defer span.End() - attSlot := a.Aggregate.Data.Slot - - // Verify attestation slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots. - currentSlot := uint64(roughtime.Now().Unix()-r.chain.GenesisTime().Unix()) / params.BeaconConfig().SecondsPerSlot - if attSlot > currentSlot || currentSlot > attSlot+params.BeaconConfig().AttestationPropagationSlotRange { - traceutil.AnnotateError(span, fmt.Errorf("attestation slot out of range %d <= %d <= %d", attSlot, currentSlot, attSlot+params.BeaconConfig().AttestationPropagationSlotRange)) + attSlot := signed.Message.Aggregate.Data.Slot + if err := validateAggregateAttTime(attSlot, uint64(r.chain.GenesisTime().Unix())); err != nil { + traceutil.AnnotateError(span, err) return false - } s, err := r.chain.HeadState(ctx) @@ -106,19 +111,25 @@ func (r *Service) validateAggregatedAtt(ctx context.Context, a *ethpb.AggregateA } // Verify validator index is within the aggregate's committee. - if err := validateIndexInCommittee(ctx, s, a.Aggregate, a.AggregatorIndex); err != nil { + if err := validateIndexInCommittee(ctx, s, signed.Message.Aggregate, signed.Message.AggregatorIndex); err != nil { traceutil.AnnotateError(span, errors.Wrapf(err, "Could not validate index in committee")) return false } // Verify selection proof reflects to the right validator and signature is valid. - if err := validateSelection(ctx, s, a.Aggregate.Data, a.AggregatorIndex, a.SelectionProof); err != nil { - traceutil.AnnotateError(span, errors.Wrapf(err, "Could not validate selection for validator %d", a.AggregatorIndex)) + if err := validateSelection(ctx, s, signed.Message.Aggregate.Data, signed.Message.AggregatorIndex, signed.Message.SelectionProof); err != nil { + traceutil.AnnotateError(span, errors.Wrapf(err, "Could not validate selection for validator %d", signed.Message.AggregatorIndex)) + return false + } + + // Verify the aggregator's signature is valid. + if err := validateAggregatorSignature(s, signed); err != nil { + traceutil.AnnotateError(span, errors.Wrapf(err, "Could not verify aggregator signature %d", signed.Message.AggregatorIndex)) return false } // Verify aggregated attestation has a valid signature. - if err := blocks.VerifyAttestation(ctx, s, a.Aggregate); err != nil { + if err := blocks.VerifyAttestation(ctx, s, signed.Message.Aggregate); err != nil { traceutil.AnnotateError(span, err) return false } @@ -126,20 +137,38 @@ func (r *Service) validateAggregatedAtt(ctx context.Context, a *ethpb.AggregateA return true } -func (r *Service) validateBlockInAttestation(ctx context.Context, a *ethpb.AggregateAttestationAndProof) bool { +func (r *Service) validateBlockInAttestation(ctx context.Context, s *ethpb.SignedAggregateAttestationAndProof) bool { + a := s.Message // Verify the block being voted and the processed state is in DB. The block should have passed validation if it's in the DB. blockRoot := bytesutil.ToBytes32(a.Aggregate.Data.BeaconBlockRoot) - hasStateSummary := featureconfig.Get().NewStateMgmt && r.db.HasStateSummary(ctx, blockRoot) || r.stateSummaryCache.Has(blockRoot) + hasStateSummary := !featureconfig.Get().DisableNewStateMgmt && r.db.HasStateSummary(ctx, blockRoot) || r.stateSummaryCache.Has(blockRoot) hasState := r.db.HasState(ctx, blockRoot) || hasStateSummary hasBlock := r.db.HasBlock(ctx, blockRoot) if !(hasState && hasBlock) { // A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue. - r.savePendingAtt(a) + r.savePendingAtt(s) return false } return true } +// Returns true if the node has received aggregate for the aggregator with index and slot. +func (r *Service) hasSeenAggregatorIndexSlot(slot uint64, aggregatorIndex uint64) bool { + r.seenAttestationLock.RLock() + defer r.seenAttestationLock.RUnlock() + b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(aggregatorIndex)...) + _, seen := r.seenAttestationCache.Get(string(b)) + return seen +} + +// Set aggregate's aggregator index slot as seen. +func (r *Service) setAggregatorIndexSlotSeen(slot uint64, aggregatorIndex uint64) { + r.seenAttestationLock.Lock() + defer r.seenAttestationLock.Unlock() + b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(aggregatorIndex)...) + r.seenAttestationCache.Add(string(b), true) +} + // This validates the aggregator's index in state is within the attesting indices of the attestation. func validateIndexInCommittee(ctx context.Context, s *stateTrie.BeaconState, a *ethpb.Attestation, validatorIndex uint64) error { ctx, span := trace.StartSpan(ctx, "sync.validateIndexInCommittee") @@ -164,6 +193,24 @@ func validateIndexInCommittee(ctx context.Context, s *stateTrie.BeaconState, a * return nil } +// Validates that the incoming aggregate attestation is in the desired time range. +func validateAggregateAttTime(attSlot uint64, genesisTime uint64) error { + // in milliseconds + attTime := 1000 * (genesisTime + (attSlot * params.BeaconConfig().SecondsPerSlot)) + attSlotRange := attSlot + params.BeaconConfig().AttestationPropagationSlotRange + attTimeRange := 1000 * (genesisTime + (attSlotRange * params.BeaconConfig().SecondsPerSlot)) + currentTimeInSec := roughtime.Now().Unix() + currentTime := 1000 * currentTimeInSec + + // Verify attestation slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots. + currentSlot := (uint64(currentTimeInSec) - genesisTime) / params.BeaconConfig().SecondsPerSlot + if attTime-uint64(maximumGossipClockDisparity.Milliseconds()) > uint64(currentTime) || + uint64(currentTime-maximumGossipClockDisparity.Milliseconds()) > attTimeRange { + return fmt.Errorf("attestation slot out of range %d <= %d <= %d", attSlot, currentSlot, attSlot+params.BeaconConfig().AttestationPropagationSlotRange) + } + return nil +} + // This validates selection proof by validating it's from the correct validator index of the slot and selection // proof is a valid signature. func validateSelection(ctx context.Context, s *stateTrie.BeaconState, data *ethpb.AttestationData, validatorIndex uint64, proof []byte) error { @@ -182,11 +229,11 @@ func validateSelection(ctx context.Context, s *stateTrie.BeaconState, data *ethp return fmt.Errorf("validator is not an aggregator for slot %d", data.Slot) } - domain, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(data.Slot), params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(data.Slot), params.BeaconConfig().DomainBeaconAttester, s.GenesisValidatorRoot()) if err != nil { return err } - slotMsg, err := ssz.HashTreeRoot(data.Slot) + slotMsg, err := helpers.ComputeSigningRoot(data.Slot, domain) if err != nil { return err } @@ -199,9 +246,26 @@ func validateSelection(ctx context.Context, s *stateTrie.BeaconState, data *ethp if err != nil { return err } - if !slotSig.Verify(slotMsg[:], pubKey, domain) { + if !slotSig.Verify(slotMsg[:], pubKey) { return errors.New("could not validate slot signature") } return nil } + +// This verifies aggregator signature over the signed aggregate and proof object. +func validateAggregatorSignature(s *stateTrie.BeaconState, a *ethpb.SignedAggregateAttestationAndProof) error { + aggregator, err := s.ValidatorAtIndex(a.Message.AggregatorIndex) + if err != nil { + return err + } + + currentEpoch := helpers.SlotToEpoch(a.Message.Aggregate.Data.Slot) + domain, err := helpers.Domain(s.Fork(), currentEpoch, params.BeaconConfig().DomainAggregateAndProof, s.GenesisValidatorRoot()) + if err != nil { + return err + } + + return helpers.VerifySigningRoot(a.Message, aggregator.PublicKey, a.Signature, domain) + +} diff --git a/beacon-chain/sync/validate_aggregate_proof_test.go b/beacon-chain/sync/validate_aggregate_proof_test.go index c65aabaf38..d38cffd4b5 100644 --- a/beacon-chain/sync/validate_aggregate_proof_test.go +++ b/beacon-chain/sync/validate_aggregate_proof_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + lru "github.com/hashicorp/golang-lru" pubsub "github.com/libp2p/go-libp2p-pubsub" pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" @@ -20,9 +21,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" - beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing" - pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/attestationutil" "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bytesutil" @@ -60,7 +59,7 @@ func TestVerifyIndexInCommittee_CanVerify(t *testing.T) { } wanted := "validator index 1000 is not within the committee" - if err := validateIndexInCommittee(ctx, s, att, 1000); !strings.Contains(err.Error(), wanted) { + if err := validateIndexInCommittee(ctx, s, att, 1000); err == nil || !strings.Contains(err.Error(), wanted) { t.Error("Did not receive wanted error") } } @@ -72,11 +71,11 @@ func TestVerifySelection_NotAnAggregator(t *testing.T) { validators := uint64(2048) beaconState, privKeys := testutil.DeterministicGenesisState(t, validators) - sig := privKeys[0].Sign([]byte{}, 0) + sig := privKeys[0].Sign([]byte{'A'}) data := ðpb.AttestationData{} wanted := "validator is not an aggregator for slot" - if err := validateSelection(ctx, beaconState, data, 0, sig.Marshal()); !strings.Contains(err.Error(), wanted) { + if err := validateSelection(ctx, beaconState, data, 0, sig.Marshal()); err == nil || !strings.Contains(err.Error(), wanted) { t.Error("Did not receive wanted error") } } @@ -86,11 +85,11 @@ func TestVerifySelection_BadSignature(t *testing.T) { validators := uint64(256) beaconState, privKeys := testutil.DeterministicGenesisState(t, validators) - sig := privKeys[0].Sign([]byte{}, 0) + sig := privKeys[0].Sign([]byte{'A'}) data := ðpb.AttestationData{} wanted := "could not validate slot signature" - if err := validateSelection(ctx, beaconState, data, 0, sig.Marshal()); !strings.Contains(err.Error(), wanted) { + if err := validateSelection(ctx, beaconState, data, 0, sig.Marshal()); err == nil || !strings.Contains(err.Error(), wanted) { t.Error("Did not receive wanted error") } } @@ -101,15 +100,15 @@ func TestVerifySelection_CanVerify(t *testing.T) { beaconState, privKeys := testutil.DeterministicGenesisState(t, validators) data := ðpb.AttestationData{} - slotRoot, err := ssz.HashTreeRoot(data.Slot) + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + slotRoot, err := helpers.ComputeSigningRoot(data.Slot, domain) if err != nil { t.Fatal(err) } - sig := privKeys[0].Sign(slotRoot[:], domain) + sig := privKeys[0].Sign(slotRoot[:]) if err := validateSelection(ctx, beaconState, data, 0, sig.Marshal()); err != nil { t.Fatal(err) @@ -133,18 +132,24 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) { Aggregate: att, AggregatorIndex: 0, } + signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof} + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ p2p: p, db: db, initialSync: &mockSync.Sync{IsSyncing: false}, attPool: attestations.NewPool(), - blkRootToPendingAtts: make(map[[32]byte][]*ethpb.AggregateAttestationAndProof), + blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), + seenAttestationCache: c, stateSummaryCache: cache.NewStateSummaryCache(), } buf := new(bytes.Buffer) - if _, err := p.Encoding().Encode(buf, aggregateAndProof); err != nil { + if _, err := p.Encoding().Encode(buf, signedAggregateAndProof); err != nil { t.Fatal(err) } @@ -152,7 +157,7 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) { Message: &pubsubpb.Message{ Data: buf.Bytes(), TopicIDs: []string{ - p2p.GossipTypeMapping[reflect.TypeOf(aggregateAndProof)], + p2p.GossipTypeMapping[reflect.TypeOf(signedAggregateAndProof)], }, }, } @@ -178,10 +183,7 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) { if err != nil { t.Fatal(err) } - s, err := beaconstate.InitializeFromProto(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + s := testutil.NewBeaconState() if err := db.SaveState(context.Background(), s, root); err != nil { t.Fatal(err) } @@ -201,22 +203,29 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) { aggregateAndProof := ðpb.AggregateAttestationAndProof{ Aggregate: att, } + signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof} if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil { t.Fatal(err) } + + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ p2p: p, db: db, initialSync: &mockSync.Sync{IsSyncing: false}, chain: &mock.ChainService{Genesis: time.Now(), State: beaconState}, - attPool: attestations.NewPool(), - stateSummaryCache: cache.NewStateSummaryCache(), + attPool: attestations.NewPool(), + seenAttestationCache: c, + stateSummaryCache: cache.NewStateSummaryCache(), } buf := new(bytes.Buffer) - if _, err := p.Encoding().Encode(buf, aggregateAndProof); err != nil { + if _, err := p.Encoding().Encode(buf, signedAggregateAndProof); err != nil { t.Fatal(err) } @@ -224,7 +233,7 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) { Message: &pubsubpb.Message{ Data: buf.Bytes(), TopicIDs: []string{ - p2p.GossipTypeMapping[reflect.TypeOf(aggregateAndProof)], + p2p.GossipTypeMapping[reflect.TypeOf(signedAggregateAndProof)], }, }, } @@ -233,10 +242,10 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) { t.Error("Expected validate to fail") } - att.Data.Slot = 1<<64 - 1 + att.Data.Slot = 1<<32 - 1 buf = new(bytes.Buffer) - if _, err := p.Encoding().Encode(buf, aggregateAndProof); err != nil { + if _, err := p.Encoding().Encode(buf, signedAggregateAndProof); err != nil { t.Fatal(err) } @@ -244,7 +253,7 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) { Message: &pubsubpb.Message{ Data: buf.Bytes(), TopicIDs: []string{ - p2p.GossipTypeMapping[reflect.TypeOf(aggregateAndProof)], + p2p.GossipTypeMapping[reflect.TypeOf(signedAggregateAndProof)], }, }, } @@ -285,23 +294,28 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) { aggregateAndProof := ðpb.AggregateAttestationAndProof{ Aggregate: att, } + signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof} if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil { t.Fatal(err) } + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ attPool: attestations.NewPool(), p2p: p, db: db, initialSync: &mockSync.Sync{IsSyncing: false}, chain: &mock.ChainService{Genesis: time.Now(), - State: beaconState, - }, - blkRootToPendingAtts: make(map[[32]byte][]*ethpb.AggregateAttestationAndProof), + State: beaconState}, + seenAttestationCache: c, + blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), } buf := new(bytes.Buffer) - if _, err := p.Encoding().Encode(buf, aggregateAndProof); err != nil { + if _, err := p.Encoding().Encode(buf, signedAggregateAndProof); err != nil { t.Fatal(err) } @@ -309,7 +323,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) { Message: &pubsubpb.Message{ Data: buf.Bytes(), TopicIDs: []string{ - p2p.GossipTypeMapping[reflect.TypeOf(aggregateAndProof)], + p2p.GossipTypeMapping[reflect.TypeOf(signedAggregateAndProof)], }, }, } @@ -338,10 +352,7 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { if err != nil { t.Fatal(err) } - s, err := beaconstate.InitializeFromProto(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + s := testutil.NewBeaconState() if err := db.SaveState(context.Background(), s, root); err != nil { t.Fatal(err) } @@ -365,36 +376,52 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { if err != nil { t.Error(err) } - hashTreeRoot, err := ssz.HashTreeRoot(att.Data) - if err != nil { - t.Error(err) - } - domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } + hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, domain) + if err != nil { + t.Error(err) + } sigs := make([]*bls.Signature, len(attestingIndices)) for i, indice := range attestingIndices { - sig := privKeys[indice].Sign(hashTreeRoot[:], domain) + sig := privKeys[indice].Sign(hashTreeRoot[:]) sigs[i] = sig } att.Signature = bls.AggregateSignatures(sigs).Marshal()[:] - slotRoot, err := ssz.HashTreeRoot(att.Data.Slot) + slotRoot, err := helpers.ComputeSigningRoot(att.Data.Slot, domain) if err != nil { t.Fatal(err) } - sig := privKeys[154].Sign(slotRoot[:], domain) + sig := privKeys[33].Sign(slotRoot[:]) aggregateAndProof := ðpb.AggregateAttestationAndProof{ SelectionProof: sig.Marshal(), Aggregate: att, - AggregatorIndex: 154, + AggregatorIndex: 33, } + signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof} + + domain, err = helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainAggregateAndProof, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + signingRoot, err := helpers.ComputeSigningRoot(signedAggregateAndProof.Message, domain) + if err != nil { + t.Error(err) + } + aggreSig := privKeys[33].Sign(signingRoot[:]).Marshal() + signedAggregateAndProof.Signature = aggreSig[:] if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil { t.Fatal(err) } + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ p2p: p, db: db, @@ -405,12 +432,13 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { FinalizedCheckPoint: ðpb.Checkpoint{ Epoch: 0, }}, - attPool: attestations.NewPool(), - stateSummaryCache: cache.NewStateSummaryCache(), + attPool: attestations.NewPool(), + seenAttestationCache: c, + stateSummaryCache: cache.NewStateSummaryCache(), } buf := new(bytes.Buffer) - if _, err := p.Encoding().Encode(buf, aggregateAndProof); err != nil { + if _, err := p.Encoding().Encode(buf, signedAggregateAndProof); err != nil { t.Fatal(err) } @@ -418,7 +446,7 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { Message: &pubsubpb.Message{ Data: buf.Bytes(), TopicIDs: []string{ - p2p.GossipTypeMapping[reflect.TypeOf(aggregateAndProof)], + p2p.GossipTypeMapping[reflect.TypeOf(signedAggregateAndProof)], }, }, } @@ -431,3 +459,125 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { t.Error("Did not set validator data") } } + +func TestVerifyIndexInCommittee_SeenAggregatorSlot(t *testing.T) { + db := dbtest.SetupDB(t) + defer dbtest.TeardownDB(t, db) + p := p2ptest.NewTestP2P(t) + + validators := uint64(256) + beaconState, privKeys := testutil.DeterministicGenesisState(t, validators) + + b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}} + if err := db.SaveBlock(context.Background(), b); err != nil { + t.Fatal(err) + } + root, err := ssz.HashTreeRoot(b.Block) + if err != nil { + t.Fatal(err) + } + s := testutil.NewBeaconState() + if err := db.SaveState(context.Background(), s, root); err != nil { + t.Fatal(err) + } + + aggBits := bitfield.NewBitlist(3) + aggBits.SetBitAt(0, true) + att := ðpb.Attestation{ + Data: ðpb.AttestationData{ + BeaconBlockRoot: root[:], + Source: ðpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")}, + Target: ðpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")}, + }, + AggregationBits: aggBits, + } + + committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex) + if err != nil { + t.Error(err) + } + attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee) + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, domain) + if err != nil { + t.Error(err) + } + sigs := make([]*bls.Signature, len(attestingIndices)) + for i, indice := range attestingIndices { + sig := privKeys[indice].Sign(hashTreeRoot[:]) + sigs[i] = sig + } + att.Signature = bls.AggregateSignatures(sigs).Marshal()[:] + + slotRoot, err := helpers.ComputeSigningRoot(att.Data.Slot, domain) + if err != nil { + t.Fatal(err) + } + + sig := privKeys[33].Sign(slotRoot[:]) + aggregateAndProof := ðpb.AggregateAttestationAndProof{ + SelectionProof: sig.Marshal(), + Aggregate: att, + AggregatorIndex: 33, + } + signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof} + + domain, err = helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainAggregateAndProof, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + signingRoot, err := helpers.ComputeSigningRoot(signedAggregateAndProof.Message, domain) + if err != nil { + t.Error(err) + } + aggreSig := privKeys[33].Sign(signingRoot[:]).Marshal() + signedAggregateAndProof.Signature = aggreSig[:] + + if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil { + t.Fatal(err) + } + + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } + r := &Service{ + p2p: p, + db: db, + initialSync: &mockSync.Sync{IsSyncing: false}, + chain: &mock.ChainService{Genesis: time.Now(), + State: beaconState, + ValidAttestation: true, + FinalizedCheckPoint: ðpb.Checkpoint{ + Epoch: 0, + }}, + attPool: attestations.NewPool(), + seenAttestationCache: c, + stateSummaryCache: cache.NewStateSummaryCache(), + } + + buf := new(bytes.Buffer) + if _, err := p.Encoding().Encode(buf, signedAggregateAndProof); err != nil { + t.Fatal(err) + } + + msg := &pubsub.Message{ + Message: &pubsubpb.Message{ + Data: buf.Bytes(), + TopicIDs: []string{ + p2p.GossipTypeMapping[reflect.TypeOf(signedAggregateAndProof)], + }, + }, + } + + if !r.validateAggregateAndProof(context.Background(), "", msg) { + t.Fatal("Validated status is false") + } + time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers. + if r.validateAggregateAndProof(context.Background(), "", msg) { + t.Fatal("Validated status is true") + } +} diff --git a/beacon-chain/sync/validate_attester_slashing.go b/beacon-chain/sync/validate_attester_slashing.go index 502ad93fae..724fea7fa1 100644 --- a/beacon-chain/sync/validate_attester_slashing.go +++ b/beacon-chain/sync/validate_attester_slashing.go @@ -2,13 +2,16 @@ package sync import ( "context" + "sort" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" + "github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/params" + "github.com/prysmaticlabs/prysm/shared/sliceutil" "github.com/prysmaticlabs/prysm/shared/traceutil" "go.opencensus.io/trace" ) @@ -41,6 +44,13 @@ func (r *Service) validateAttesterSlashing(ctx context.Context, pid peer.ID, msg return false } + if slashing == nil || slashing.Attestation_1 == nil || slashing.Attestation_2 == nil { + return false + } + if r.hasSeenAttesterSlashingIndices(slashing.Attestation_1.AttestingIndices, slashing.Attestation_2.AttestingIndices) { + return false + } + // Retrieve head state, advance state to the epoch slot used specified in slashing message. s, err := r.chain.HeadState(ctx) if err != nil { @@ -66,3 +76,40 @@ func (r *Service) validateAttesterSlashing(ctx context.Context, pid peer.ID, msg msg.ValidatorData = slashing // Used in downstream subscriber return true } + +// Returns true if the node has already received a valid attester slashing with the attesting indices. +func (r *Service) hasSeenAttesterSlashingIndices(indices1 []uint64, indices2 []uint64) bool { + r.seenAttesterSlashingLock.RLock() + defer r.seenAttesterSlashingLock.RUnlock() + + slashableIndices := sliceutil.IntersectionUint64(indices1, indices2) + sort.SliceStable(slashableIndices, func(i, j int) bool { + return slashableIndices[i] < slashableIndices[j] + }) + IndicesInBytes := make([]byte, 0, len(slashableIndices)) + for _, i := range slashableIndices { + IndicesInBytes = append(IndicesInBytes, byte(i)) + } + b := hashutil.FastSum256(IndicesInBytes) + + _, seen := r.seenAttesterSlashingCache.Get(b) + return seen +} + +// Set attester slashing indices in attester slashing cache. +func (r *Service) setAttesterSlashingIndicesSeen(indices1 []uint64, indices2 []uint64) { + r.seenAttesterSlashingLock.Lock() + defer r.seenAttesterSlashingLock.Unlock() + + slashableIndices := sliceutil.IntersectionUint64(indices1, indices2) + sort.SliceStable(slashableIndices, func(i, j int) bool { + return slashableIndices[i] < slashableIndices[j] + }) + IndicesInBytes := make([]byte, 0, len(slashableIndices)) + for _, i := range slashableIndices { + IndicesInBytes = append(IndicesInBytes, byte(i)) + } + b := hashutil.FastSum256(IndicesInBytes) + + r.seenAttesterSlashingCache.Add(b, true) +} diff --git a/beacon-chain/sync/validate_attester_slashing_test.go b/beacon-chain/sync/validate_attester_slashing_test.go index 8f807088d5..93752d4f92 100644 --- a/beacon-chain/sync/validate_attester_slashing_test.go +++ b/beacon-chain/sync/validate_attester_slashing_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" + lru "github.com/hashicorp/golang-lru" pubsub "github.com/libp2p/go-libp2p-pubsub" pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/p2p" @@ -40,16 +40,16 @@ func setupValidAttesterSlashing(t *testing.T) (*ethpb.AttesterSlashing, *stateTr }, AttestingIndices: []uint64{0, 1}, } - hashTreeRoot, err := ssz.HashTreeRoot(att1.Data) - if err != nil { - t.Error(err) - } - domain, err := helpers.Domain(state.Fork(), 0, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(state.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - sig0 := privKeys[0].Sign(hashTreeRoot[:], domain) - sig1 := privKeys[1].Sign(hashTreeRoot[:], domain) + hashTreeRoot, err := helpers.ComputeSigningRoot(att1.Data, domain) + if err != nil { + t.Error(err) + } + sig0 := privKeys[0].Sign(hashTreeRoot[:]) + sig1 := privKeys[1].Sign(hashTreeRoot[:]) aggregateSig := bls.AggregateSignatures([]*bls.Signature{sig0, sig1}) att1.Signature = aggregateSig.Marshal()[:] @@ -60,12 +60,12 @@ func setupValidAttesterSlashing(t *testing.T) (*ethpb.AttesterSlashing, *stateTr }, AttestingIndices: []uint64{0, 1}, } - hashTreeRoot, err = ssz.HashTreeRoot(att2.Data) + hashTreeRoot, err = helpers.ComputeSigningRoot(att2.Data, domain) if err != nil { t.Error(err) } - sig0 = privKeys[0].Sign(hashTreeRoot[:], domain) - sig1 = privKeys[1].Sign(hashTreeRoot[:], domain) + sig0 = privKeys[0].Sign(hashTreeRoot[:]) + sig1 = privKeys[1].Sign(hashTreeRoot[:]) aggregateSig = bls.AggregateSignatures([]*bls.Signature{sig0, sig1}) att2.Signature = aggregateSig.Marshal()[:] @@ -93,10 +93,15 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) { slashing, s := setupValidAttesterSlashing(t) + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ - p2p: p, - chain: &mock.ChainService{State: s}, - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p, + chain: &mock.ChainService{State: s}, + initialSync: &mockSync.Sync{IsSyncing: false}, + seenAttesterSlashingCache: c, } buf := new(bytes.Buffer) @@ -131,10 +136,15 @@ func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) { ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ - p2p: p, - chain: &mock.ChainService{State: state}, - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p, + chain: &mock.ChainService{State: state}, + initialSync: &mockSync.Sync{IsSyncing: false}, + seenAttesterSlashingCache: c, } buf := new(bytes.Buffer) diff --git a/beacon-chain/sync/validate_beacon_blocks.go b/beacon-chain/sync/validate_beacon_blocks.go index 22c4750213..f292363e95 100644 --- a/beacon-chain/sync/validate_beacon_blocks.go +++ b/beacon-chain/sync/validate_beacon_blocks.go @@ -7,8 +7,9 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/traceutil" "go.opencensus.io/trace" ) @@ -46,10 +47,21 @@ func (r *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms return false } + if blk.Block == nil { + return false + } + // Verify the block is the first block received for the proposer for the slot. + if r.hasSeenBlockIndexSlot(blk.Block.Slot, blk.Block.ProposerIndex) { + return false + } + blockRoot, err := ssz.HashTreeRoot(blk.Block) if err != nil { return false } + if r.db.HasBlock(ctx, blockRoot) { + return false + } r.pendingQueueLock.RLock() if r.seenPendingBlocks[blockRoot] { @@ -58,20 +70,74 @@ func (r *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms } r.pendingQueueLock.RUnlock() - if err := helpers.VerifySlotTime(uint64(r.chain.GenesisTime().Unix()), blk.Block.Slot); err != nil { + if err := helpers.VerifySlotTime(uint64(r.chain.GenesisTime().Unix()), blk.Block.Slot, maximumGossipClockDisparity); err != nil { log.WithError(err).WithField("blockSlot", blk.Block.Slot).Warn("Rejecting incoming block.") return false } - if r.chain.FinalizedCheckpt().Epoch > helpers.SlotToEpoch(blk.Block.Slot) { - log.Debug("Block older than finalized checkpoint received,rejecting it") + if helpers.StartSlot(r.chain.FinalizedCheckpt().Epoch) >= blk.Block.Slot { + log.Debug("Block slot older/equal than last finalized epoch start slot, rejecting it") return false } - if _, err = bls.SignatureFromBytes(blk.Signature); err != nil { + // Handle block when the parent is unknown. + if !r.db.HasBlock(ctx, bytesutil.ToBytes32(blk.Block.ParentRoot)) { + r.pendingQueueLock.Lock() + r.slotToPendingBlocks[blk.Block.Slot] = blk + r.seenPendingBlocks[blockRoot] = true + r.pendingQueueLock.Unlock() + return false + } + + hasStateSummaryDB := r.db.HasStateSummary(ctx, bytesutil.ToBytes32(blk.Block.ParentRoot)) + hasStateSummaryCache := r.stateSummaryCache.Has(bytesutil.ToBytes32(blk.Block.ParentRoot)) + if !hasStateSummaryDB && !hasStateSummaryCache { + log.WithError(err).WithField("blockSlot", blk.Block.Slot).Warn("No access to parent state") + return false + } + parentState, err := r.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(blk.Block.ParentRoot)) + if err != nil { + log.WithError(err).WithField("blockSlot", blk.Block.Slot).Warn("Could not get parent state") + return false + } + + if err := blocks.VerifyBlockHeaderSignature(parentState, blk); err != nil { + log.WithError(err).WithField("blockSlot", blk.Block.Slot).Warn("Could not verify block signature") + return false + } + + err = parentState.SetSlot(blk.Block.Slot) + if err != nil { + log.WithError(err).WithField("blockSlot", blk.Block.Slot).Warn("Could not set parent state slot") + return false + } + idx, err := helpers.BeaconProposerIndex(parentState) + if err != nil { + log.WithError(err).WithField("blockSlot", blk.Block.Slot).Warn("Could not get proposer index using parent state") + return false + } + if blk.Block.ProposerIndex != idx { + log.WithError(err).WithField("blockSlot", blk.Block.Slot).Warn("Incorrect proposer index") return false } msg.ValidatorData = blk // Used in downstream subscriber return true } + +// Returns true if the block is not the first block proposed for the proposer for the slot. +func (r *Service) hasSeenBlockIndexSlot(slot uint64, proposerIdx uint64) bool { + r.seenBlockLock.RLock() + defer r.seenBlockLock.RUnlock() + b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(proposerIdx)...) + _, seen := r.seenBlockCache.Get(string(b)) + return seen +} + +// Set block proposer index and slot as seen for incoming blocks. +func (r *Service) setSeenBlockIndexSlot(slot uint64, proposerIdx uint64) { + r.seenBlockLock.Lock() + defer r.seenBlockLock.Unlock() + b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(proposerIdx)...) + r.seenBlockCache.Add(string(b), true) +} diff --git a/beacon-chain/sync/validate_beacon_blocks_test.go b/beacon-chain/sync/validate_beacon_blocks_test.go index b0296dc356..fb3cf9cc81 100644 --- a/beacon-chain/sync/validate_beacon_blocks_test.go +++ b/beacon-chain/sync/validate_beacon_blocks_test.go @@ -7,17 +7,26 @@ import ( "testing" "time" + lru "github.com/hashicorp/golang-lru" pubsub "github.com/libp2p/go-libp2p-pubsub" pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/go-ssz" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/cache" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/beacon-chain/p2p" p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bytesutil" + "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/testutil" + logTest "github.com/sirupsen/logrus/hooks/test" ) // General note for writing validation tests: Use a random value for any field @@ -38,6 +47,10 @@ func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) { p := p2ptest.NewTestP2P(t) + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ db: db, p2p: p, @@ -46,6 +59,7 @@ func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) { FinalizedCheckPoint: ðpb.Checkpoint{ Epoch: 0, }}, + seenBlockCache: c, } buf := new(bytes.Buffer) @@ -83,11 +97,17 @@ func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) { t.Fatal(err) } + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ - db: db, - p2p: p, - initialSync: &mockSync.Sync{IsSyncing: false}, - chain: &mock.ChainService{Genesis: time.Now()}, + db: db, + p2p: p, + initialSync: &mockSync.Sync{IsSyncing: false}, + chain: &mock.ChainService{Genesis: time.Now()}, + seenBlockCache: c, + stateSummaryCache: cache.NewStateSummaryCache(), } buf := new(bytes.Buffer) @@ -104,38 +124,82 @@ func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) { }, } result := r.validateBeaconBlockPubSub(ctx, "", m) - if result { t.Error("Expected false result, got true") } } -func TestValidateBeaconBlockPubSub_ValidSignature(t *testing.T) { +func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) { db := dbtest.SetupDB(t) defer dbtest.TeardownDB(t, db) p := p2ptest.NewTestP2P(t) ctx := context.Background() - b := []byte("sk") - b32 := bytesutil.ToBytes32(b) - sk, err := bls.SecretKeyFromBytes(b32[:]) + beaconState, privKeys := testutil.DeterministicGenesisState(t, 100) + parentBlock := ðpb.SignedBeaconBlock{ + Block: ðpb.BeaconBlock{ + ProposerIndex: 0, + Slot: 0, + }, + } + if err := db.SaveBlock(ctx, parentBlock); err != nil { + t.Fatal(err) + } + bRoot, err := ssz.HashTreeRoot(parentBlock.Block) + if err := db.SaveState(ctx, beaconState, bRoot); err != nil { + t.Fatal(err) + } + if err := db.SaveStateSummary(ctx, &pb.StateSummary{ + Root: bRoot[:], + }); err != nil { + t.Fatal(err) + } + copied := beaconState.Copy() + if err := copied.SetSlot(1); err != nil { + t.Fatal(err) + } + proposerIdx, err := helpers.BeaconProposerIndex(copied) if err != nil { t.Fatal(err) } msg := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ - ParentRoot: testutil.Random32Bytes(t), + ProposerIndex: proposerIdx, + Slot: 1, + ParentRoot: bRoot[:], }, - Signature: sk.Sign([]byte("data"), 0).Marshal(), } + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + signingRoot, err := helpers.ComputeSigningRoot(msg.Block, domain) + if err != nil { + t.Error(err) + } + blockSig := privKeys[proposerIdx].Sign(signingRoot[:]).Marshal() + msg.Signature = blockSig[:] + + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } + stateSummaryCache := cache.NewStateSummaryCache() + stateGen := stategen.New(db, stateSummaryCache) r := &Service{ db: db, p2p: p, initialSync: &mockSync.Sync{IsSyncing: false}, - chain: &mock.ChainService{Genesis: time.Now(), + chain: &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0), + State: beaconState, FinalizedCheckPoint: ðpb.Checkpoint{ Epoch: 0, }}, + seenBlockCache: c, + slotToPendingBlocks: make(map[uint64]*ethpb.SignedBeaconBlock), + seenPendingBlocks: make(map[[32]byte]bool), + stateSummaryCache: stateSummaryCache, + stateGen: stateGen, } buf := new(bytes.Buffer) @@ -175,7 +239,7 @@ func TestValidateBeaconBlockPubSub_Syncing(t *testing.T) { Block: ðpb.BeaconBlock{ ParentRoot: testutil.Random32Bytes(t), }, - Signature: sk.Sign([]byte("data"), 0).Marshal(), + Signature: sk.Sign([]byte("data")).Marshal(), } r := &Service{ @@ -223,14 +287,21 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromFuture(t *testing.T) { ParentRoot: testutil.Random32Bytes(t), Slot: 1000, }, - Signature: sk.Sign([]byte("data"), 0).Marshal(), + Signature: sk.Sign([]byte("data")).Marshal(), } + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ - p2p: p, - db: db, - initialSync: &mockSync.Sync{IsSyncing: false}, - chain: &mock.ChainService{Genesis: time.Now()}, + p2p: p, + db: db, + initialSync: &mockSync.Sync{IsSyncing: false}, + chain: &mock.ChainService{Genesis: time.Now()}, + seenBlockCache: c, + slotToPendingBlocks: make(map[uint64]*ethpb.SignedBeaconBlock), + seenPendingBlocks: make(map[[32]byte]bool), } buf := new(bytes.Buffer) @@ -267,10 +338,14 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) { ParentRoot: testutil.Random32Bytes(t), Slot: 10, }, - Signature: sk.Sign([]byte("data"), 0).Marshal(), + Signature: sk.Sign([]byte("data")).Marshal(), } genesisTime := time.Now() + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ db: db, p2p: p, @@ -280,6 +355,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) { FinalizedCheckPoint: ðpb.Checkpoint{ Epoch: 1, }}, + seenBlockCache: c, } buf := new(bytes.Buffer) @@ -300,3 +376,158 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) { t.Error("Expected false result, got true") } } + +func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) { + db := dbtest.SetupDB(t) + defer dbtest.TeardownDB(t, db) + p := p2ptest.NewTestP2P(t) + ctx := context.Background() + beaconState, privKeys := testutil.DeterministicGenesisState(t, 100) + parentBlock := ðpb.SignedBeaconBlock{ + Block: ðpb.BeaconBlock{ + ProposerIndex: 0, + Slot: 0, + }, + } + if err := db.SaveBlock(ctx, parentBlock); err != nil { + t.Fatal(err) + } + bRoot, err := ssz.HashTreeRoot(parentBlock.Block) + if err := db.SaveState(ctx, beaconState, bRoot); err != nil { + t.Fatal(err) + } + if err != nil { + t.Fatal(err) + } + proposerIdx, err := helpers.BeaconProposerIndex(beaconState) + if err != nil { + t.Fatal(err) + } + + msg := ðpb.SignedBeaconBlock{ + Block: ðpb.BeaconBlock{ + ProposerIndex: proposerIdx, + Slot: 1, + ParentRoot: bRoot[:], + }, + } + + domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + signingRoot, err := helpers.ComputeSigningRoot(msg.Block, domain) + if err != nil { + t.Error(err) + } + blockSig := privKeys[proposerIdx].Sign(signingRoot[:]).Marshal() + msg.Signature = blockSig[:] + + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } + r := &Service{ + db: db, + p2p: p, + initialSync: &mockSync.Sync{IsSyncing: false}, + chain: &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0), + State: beaconState, + FinalizedCheckPoint: ðpb.Checkpoint{ + Epoch: 0, + }}, + seenBlockCache: c, + slotToPendingBlocks: make(map[uint64]*ethpb.SignedBeaconBlock), + seenPendingBlocks: make(map[[32]byte]bool), + stateSummaryCache: cache.NewStateSummaryCache(), + } + + buf := new(bytes.Buffer) + if _, err := p.Encoding().Encode(buf, msg); err != nil { + t.Fatal(err) + } + m := &pubsub.Message{ + Message: &pubsubpb.Message{ + Data: buf.Bytes(), + TopicIDs: []string{ + p2p.GossipTypeMapping[reflect.TypeOf(msg)], + }, + }, + } + r.setSeenBlockIndexSlot(msg.Block.Slot, msg.Block.ProposerIndex) + time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers. + result := r.validateBeaconBlockPubSub(ctx, "", m) + if result { + t.Error("Expected false result, got true") + } +} + +func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) { + hook := logTest.NewGlobal() + db := dbtest.SetupDB(t) + defer dbtest.TeardownDB(t, db) + p := p2ptest.NewTestP2P(t) + + parent := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}} + if err := db.SaveBlock(context.Background(), parent); err != nil { + t.Fatal(err) + } + parentRoot, err := ssz.HashTreeRoot(parent.Block) + if err != nil { + t.Fatal(err) + } + chain := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0), + FinalizedCheckPoint: ðpb.Checkpoint{ + Epoch: 1, + }} + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } + r := &Service{ + db: db, + p2p: p, + chain: chain, + blockNotifier: chain.BlockNotifier(), + attPool: attestations.NewPool(), + seenBlockCache: c, + initialSync: &mockSync.Sync{IsSyncing: false}, + } + + b := ðpb.SignedBeaconBlock{ + Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: parentRoot[:], Body: ðpb.BeaconBlockBody{}}, + } + buf := new(bytes.Buffer) + if _, err := p.Encoding().Encode(buf, b); err != nil { + t.Fatal(err) + } + m := &pubsub.Message{ + Message: &pubsubpb.Message{ + Data: buf.Bytes(), + TopicIDs: []string{ + p2p.GossipTypeMapping[reflect.TypeOf(b)], + }, + }, + } + + r.validateBeaconBlockPubSub(context.Background(), "", m) + testutil.AssertLogsContain(t, hook, "Block slot older/equal than last finalized epoch start slot, rejecting it") + + hook.Reset() + b.Block.Slot = params.BeaconConfig().SlotsPerEpoch + buf = new(bytes.Buffer) + if _, err := p.Encoding().Encode(buf, b); err != nil { + t.Fatal(err) + } + m = &pubsub.Message{ + Message: &pubsubpb.Message{ + Data: buf.Bytes(), + TopicIDs: []string{ + p2p.GossipTypeMapping[reflect.TypeOf(b)], + }, + }, + } + + r.validateBeaconBlockPubSub(context.Background(), "", m) + testutil.AssertLogsDoNotContain(t, hook, "Block slot older/equal than last finalized epoch start slot, rejecting itt") +} diff --git a/beacon-chain/sync/validate_committee_index_beacon_attestation.go b/beacon-chain/sync/validate_committee_index_beacon_attestation.go index 9e84c63f0c..42e6879c25 100644 --- a/beacon-chain/sync/validate_committee_index_beacon_attestation.go +++ b/beacon-chain/sync/validate_committee_index_beacon_attestation.go @@ -9,11 +9,9 @@ import ( "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/p2p" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/featureconfig" - "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/traceutil" "go.opencensus.io/trace" ) @@ -55,8 +53,22 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p return false } + if att.Data == nil { + return false + } + // Verify this the first attestation received for the participating validator for the slot. + if s.hasSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits) { + return false + } + // The attestation's committee index (attestation.data.index) is for the correct subnet. - if !strings.HasPrefix(originalTopic, fmt.Sprintf(format, att.Data.CommitteeIndex)) { + digest, err := s.p2p.ForkDigest() + if err != nil { + log.WithError(err).Error("Failed to compute fork digest") + traceutil.AnnotateError(span, err) + return false + } + if !strings.HasPrefix(originalTopic, fmt.Sprintf(format, digest, att.Data.CommitteeIndex)) { return false } @@ -66,21 +78,19 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p } // Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE. - currentSlot := helpers.SlotsSince(s.chain.GenesisTime()) - upper := att.Data.Slot + params.BeaconConfig().AttestationPropagationSlotRange - lower := att.Data.Slot - if currentSlot > upper || currentSlot < lower { + if err := validateAggregateAttTime(att.Data.Slot, uint64(s.chain.GenesisTime().Unix())); err != nil { + traceutil.AnnotateError(span, err) return false } // Verify the block being voted and the processed state is in DB and. The block should have passed validation if it's in the DB. blockRoot := bytesutil.ToBytes32(att.Data.BeaconBlockRoot) - hasStateSummary := featureconfig.Get().NewStateMgmt && s.db.HasStateSummary(ctx, blockRoot) || s.stateSummaryCache.Has(blockRoot) + hasStateSummary := !featureconfig.Get().DisableNewStateMgmt && s.db.HasStateSummary(ctx, blockRoot) || s.stateSummaryCache.Has(blockRoot) hasState := s.db.HasState(ctx, blockRoot) || hasStateSummary hasBlock := s.db.HasBlock(ctx, blockRoot) if !(hasState && hasBlock) { // A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue. - s.savePendingAtt(ð.AggregateAttestationAndProof{Aggregate: att}) + s.savePendingAtt(ð.SignedAggregateAttestationAndProof{Message: ð.AggregateAttestationAndProof{Aggregate: att}}) return false } @@ -89,7 +99,28 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p return false } + s.setSeenCommitteeIndicesSlot(att.Data.Slot, att.Data.CommitteeIndex, att.AggregationBits) + msg.ValidatorData = att return true } + +// Returns true if the attestation was already seen for the participating validator for the slot. +func (s *Service) hasSeenCommitteeIndicesSlot(slot uint64, committeeID uint64, aggregateBits []byte) bool { + s.seenAttestationLock.RLock() + defer s.seenAttestationLock.RUnlock() + b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(committeeID)...) + b = append(b, aggregateBits...) + _, seen := s.seenAttestationCache.Get(string(b)) + return seen +} + +// Set committee's indices and slot as seen for incoming attestations. +func (s *Service) setSeenCommitteeIndicesSlot(slot uint64, committeeID uint64, aggregateBits []byte) { + s.seenAttestationLock.Lock() + defer s.seenAttestationLock.Unlock() + b := append(bytesutil.Bytes32(slot), bytesutil.Bytes32(committeeID)...) + b = append(b, aggregateBits...) + s.seenAttestationCache.Add(string(b), true) +} diff --git a/beacon-chain/sync/validate_committee_index_beacon_attestation_test.go b/beacon-chain/sync/validate_committee_index_beacon_attestation_test.go index 6b84889b3f..5c916b1ad5 100644 --- a/beacon-chain/sync/validate_committee_index_beacon_attestation_test.go +++ b/beacon-chain/sync/validate_committee_index_beacon_attestation_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + lru "github.com/hashicorp/golang-lru" pubsub "github.com/libp2p/go-libp2p-pubsub" pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" @@ -15,11 +16,10 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/cache" dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing" - beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state" mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing" - pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/params" + "github.com/prysmaticlabs/prysm/shared/testutil" ) func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { @@ -31,12 +31,18 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { Genesis: time.Now().Add(time.Duration(-64*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second), // 64 slots ago ValidAttestation: true, } + + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } s := &Service{ initialSync: &mockSync.Sync{IsSyncing: false}, p2p: p, db: db, chain: chain, - blkRootToPendingAtts: make(map[[32]byte][]*ethpb.AggregateAttestationAndProof), + blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof), + seenAttestationCache: c, stateSummaryCache: cache.NewStateSummaryCache(), } @@ -54,10 +60,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { t.Fatal(err) } - savedState, err := beaconstate.InitializeFromProto(&pb.BeaconState{}) - if err != nil { - t.Fatal(err) - } + savedState := testutil.NewBeaconState() if err := db.SaveState(context.Background(), savedState, validBlockRoot); err != nil { t.Fatal(err) } @@ -79,10 +82,24 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { Slot: 63, }, }, - topic: "/eth2/committee_index1_beacon_attestation", + topic: "/eth2/00000000/committee_index1_beacon_attestation", validAttestationSignature: true, want: true, }, + { + name: "alreadySeen", + msg: ðpb.Attestation{ + AggregationBits: bitfield.Bitlist{0b1010}, + Data: ðpb.AttestationData{ + BeaconBlockRoot: validBlockRoot[:], + CommitteeIndex: 1, + Slot: 63, + }, + }, + topic: "/eth2/00000000/committee_index1_beacon_attestation", + validAttestationSignature: true, + want: false, + }, { name: "wrong committee index", msg: ðpb.Attestation{ @@ -93,7 +110,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { Slot: 63, }, }, - topic: "/eth2/committee_index3_beacon_attestation", + topic: "/eth2/00000000/committee_index3_beacon_attestation", validAttestationSignature: true, want: false, }, @@ -107,7 +124,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { Slot: 63, }, }, - topic: "/eth2/committee_index1_beacon_attestation", + topic: "/eth2/00000000/committee_index1_beacon_attestation", validAttestationSignature: true, want: false, }, @@ -121,7 +138,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { Slot: 63, }, }, - topic: "/eth2/committee_index1_beacon_attestation", + topic: "/eth2/00000000/committee_index1_beacon_attestation", validAttestationSignature: true, want: false, }, @@ -135,7 +152,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { Slot: 63, }, }, - topic: "/eth2/committee_index1_beacon_attestation", + topic: "/eth2/00000000/committee_index1_beacon_attestation", validAttestationSignature: false, want: false, }, @@ -156,7 +173,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { } chain.ValidAttestation = tt.validAttestationSignature if s.validateCommitteeIndexBeaconAttestation(ctx, "" /*peerID*/, m) != tt.want { - t.Errorf("Did not received wanted validation. Got %v, wanted %v", !tt.want, tt.want) + t.Fatalf("Did not received wanted validation. Got %v, wanted %v", !tt.want, tt.want) } if tt.want && m.ValidatorData == nil { t.Error("Expected validator data to be set") diff --git a/beacon-chain/sync/validate_proposer_slashing.go b/beacon-chain/sync/validate_proposer_slashing.go index 79bcff959b..504f250ecf 100644 --- a/beacon-chain/sync/validate_proposer_slashing.go +++ b/beacon-chain/sync/validate_proposer_slashing.go @@ -41,6 +41,13 @@ func (r *Service) validateProposerSlashing(ctx context.Context, pid peer.ID, msg return false } + if slashing.Header_1 == nil || slashing.Header_1.Header == nil { + return false + } + if r.hasSeenProposerSlashingIndex(slashing.Header_1.Header.ProposerIndex) { + return false + } + // Retrieve head state, advance state to the epoch slot used specified in slashing message. s, err := r.chain.HeadState(ctx) if err != nil { @@ -65,3 +72,18 @@ func (r *Service) validateProposerSlashing(ctx context.Context, pid peer.ID, msg msg.ValidatorData = slashing // Used in downstream subscriber return true } + +// Returns true if the node has already received a valid proposer slashing received for the proposer with index +func (r *Service) hasSeenProposerSlashingIndex(i uint64) bool { + r.seenProposerSlashingLock.RLock() + defer r.seenProposerSlashingLock.RUnlock() + _, seen := r.seenProposerSlashingCache.Get(i) + return seen +} + +// Set proposer slashing index in proposer slashing cache. +func (r *Service) setProposerSlashingIndexSeen(i uint64) { + r.seenProposerSlashingLock.Lock() + defer r.seenProposerSlashingLock.Unlock() + r.seenProposerSlashingCache.Add(i, true) +} diff --git a/beacon-chain/sync/validate_proposer_slashing_test.go b/beacon-chain/sync/validate_proposer_slashing_test.go index a8ff56ea0f..35f0a6862b 100644 --- a/beacon-chain/sync/validate_proposer_slashing_test.go +++ b/beacon-chain/sync/validate_proposer_slashing_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" + lru "github.com/hashicorp/golang-lru" pubsub "github.com/libp2p/go-libp2p-pubsub" pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/p2p" @@ -64,6 +64,7 @@ func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, *stateTr state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainBeaconProposer, + state.GenesisValidatorRoot(), ) if err != nil { t.Fatal(err) @@ -74,36 +75,37 @@ func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, *stateTr someRoot2 := [32]byte{4, 5, 6} header1 := ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, - ParentRoot: someRoot[:], - StateRoot: someRoot[:], - BodyRoot: someRoot[:], + ProposerIndex: 1, + Slot: 0, + ParentRoot: someRoot[:], + StateRoot: someRoot[:], + BodyRoot: someRoot[:], }, } - signingRoot, err := ssz.HashTreeRoot(header1.Header) + signingRoot, err := helpers.ComputeSigningRoot(header1.Header, domain) if err != nil { t.Errorf("Could not get signing root of beacon block header: %v", err) } - header1.Signature = privKey.Sign(signingRoot[:], domain).Marshal()[:] + header1.Signature = privKey.Sign(signingRoot[:]).Marshal()[:] header2 := ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 0, - ParentRoot: someRoot2[:], - StateRoot: someRoot2[:], - BodyRoot: someRoot2[:], + ProposerIndex: 1, + Slot: 0, + ParentRoot: someRoot2[:], + StateRoot: someRoot2[:], + BodyRoot: someRoot2[:], }, } - signingRoot, err = ssz.HashTreeRoot(header2.Header) + signingRoot, err = helpers.ComputeSigningRoot(header2.Header, domain) if err != nil { t.Errorf("Could not get signing root of beacon block header: %v", err) } - header2.Signature = privKey.Sign(signingRoot[:], domain).Marshal()[:] + header2.Signature = privKey.Sign(signingRoot[:]).Marshal()[:] slashing := ðpb.ProposerSlashing{ - ProposerIndex: 1, - Header_1: header1, - Header_2: header2, + Header_1: header1, + Header_2: header2, } val, err := state.ValidatorAtIndex(1) if err != nil { @@ -128,10 +130,15 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) { slashing, s := setupValidProposerSlashing(t) + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ - p2p: p, - chain: &mock.ChainService{State: s}, - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p, + chain: &mock.ChainService{State: s}, + initialSync: &mockSync.Sync{IsSyncing: false}, + seenProposerSlashingCache: c, } buf := new(bytes.Buffer) @@ -165,10 +172,15 @@ func TestValidateProposerSlashing_ContextTimeout(t *testing.T) { ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ - p2p: p, - chain: &mock.ChainService{State: state}, - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p, + chain: &mock.ChainService{State: state}, + initialSync: &mockSync.Sync{IsSyncing: false}, + seenProposerSlashingCache: c, } buf := new(bytes.Buffer) diff --git a/beacon-chain/sync/validate_voluntary_exit.go b/beacon-chain/sync/validate_voluntary_exit.go index 6d3d18e9cd..fa7b164c36 100644 --- a/beacon-chain/sync/validate_voluntary_exit.go +++ b/beacon-chain/sync/validate_voluntary_exit.go @@ -41,6 +41,13 @@ func (r *Service) validateVoluntaryExit(ctx context.Context, pid peer.ID, msg *p return false } + if exit.Exit == nil { + return false + } + if r.hasSeenExitIndex(exit.Exit.ValidatorIndex) { + return false + } + s, err := r.chain.HeadState(ctx) if err != nil { return false @@ -54,7 +61,7 @@ func (r *Service) validateVoluntaryExit(ctx context.Context, pid peer.ID, msg *p if err != nil { return false } - if err := blocks.VerifyExit(val, exitedEpochSlot, s.Fork(), exit); err != nil { + if err := blocks.VerifyExit(val, exitedEpochSlot, s.Fork(), exit, s.GenesisValidatorRoot()); err != nil { return false } @@ -62,3 +69,18 @@ func (r *Service) validateVoluntaryExit(ctx context.Context, pid peer.ID, msg *p return true } + +// Returns true if the node has already received a valid exit request for the validator with index `i`. +func (r *Service) hasSeenExitIndex(i uint64) bool { + r.seenExitLock.RLock() + defer r.seenExitLock.RUnlock() + _, seen := r.seenExitCache.Get(i) + return seen +} + +// Set exit request index `i` in seen exit request cache. +func (r *Service) setExitIndexSeen(i uint64) { + r.seenExitLock.Lock() + defer r.seenExitLock.Unlock() + r.seenExitCache.Add(i, true) +} diff --git a/beacon-chain/sync/validate_voluntary_exit_test.go b/beacon-chain/sync/validate_voluntary_exit_test.go index 2f669ff4ef..254e21707d 100644 --- a/beacon-chain/sync/validate_voluntary_exit_test.go +++ b/beacon-chain/sync/validate_voluntary_exit_test.go @@ -7,10 +7,10 @@ import ( "reflect" "testing" + lru "github.com/hashicorp/golang-lru" pubsub "github.com/libp2p/go-libp2p-pubsub" pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/p2p" @@ -51,17 +51,17 @@ func setupValidExit(t *testing.T) (*ethpb.SignedVoluntaryExit, *stateTrie.Beacon ); err != nil { t.Fatal(err) } - signingRoot, err := ssz.HashTreeRoot(exit.Exit) - if err != nil { - t.Error(err) - } - domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainVoluntaryExit) + domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainVoluntaryExit, state.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } + signingRoot, err := helpers.ComputeSigningRoot(exit.Exit, domain) + if err != nil { + t.Error(err) + } priv := bls.RandKey() - sig := priv.Sign(signingRoot[:], domain) + sig := priv.Sign(signingRoot[:]) exit.Signature = sig.Marshal() val, err := state.ValidatorAtIndex(0) @@ -87,12 +87,17 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) { exit, s := setupValidExit(t) + c, err := lru.New(10) + if err != nil { + t.Fatal(err) + } r := &Service{ p2p: p, chain: &mock.ChainService{ State: s, }, - initialSync: &mockSync.Sync{IsSyncing: false}, + initialSync: &mockSync.Sync{IsSyncing: false}, + seenExitCache: c, } buf := new(bytes.Buffer) diff --git a/beacon-chain/usage.go b/beacon-chain/usage.go index 83501f4ad3..311bf07c33 100644 --- a/beacon-chain/usage.go +++ b/beacon-chain/usage.go @@ -92,7 +92,7 @@ var appHelpFlagGroups = []flagGroup{ flags.SetGCPercent, flags.UnsafeSync, flags.SlotsPerArchivedPoint, - flags.EnableDiscv5, + flags.DisableDiscv5, }, }, { @@ -103,6 +103,7 @@ var appHelpFlagGroups = []flagGroup{ cmd.P2PHostDNS, cmd.P2PMaxPeers, cmd.P2PPrivKey, + cmd.P2PMetadata, cmd.P2PWhitelist, cmd.StaticPeers, cmd.EnableUPnPFlag, diff --git a/endtoend/BUILD.bazel b/endtoend/BUILD.bazel index 2c050c870a..14009e9d1c 100644 --- a/endtoend/BUILD.bazel +++ b/endtoend/BUILD.bazel @@ -16,6 +16,7 @@ go_test( "//beacon-chain", "//slasher", "//validator", + "//tools/bootnode", "@com_github_ethereum_go_ethereum//cmd/geth", ], shard_count = 4, diff --git a/endtoend/README.md b/endtoend/README.md index b20b4a985c..75a9f77810 100644 --- a/endtoend/README.md +++ b/endtoend/README.md @@ -12,7 +12,7 @@ Evaluators have 3 parts, the name for it's test name, a `policy` which declares ## Current end-to-end tests * Minimal Config - 4 beacon nodes, 64 validators, running for 6 epochs -* ~~Demo Config - 2 beacon nodes, 16,384 validators, running for 5 epochs~~ Disabled for now +* ~~Mainnet Config - 2 beacon nodes, 16,384 validators, running for 5 epochs~~ Disabled for now ## Instructions If you wish to run all the E2E tests, you can run them through bazel with: diff --git a/endtoend/components/beacon_node.go b/endtoend/components/beacon_node.go index b193213cbf..826aa8f391 100644 --- a/endtoend/components/beacon_node.go +++ b/endtoend/components/beacon_node.go @@ -16,20 +16,17 @@ import ( ) // StartBeaconNodes starts the requested amount of beacon nodes, passing in the deposit contract given. -func StartBeaconNodes(t *testing.T, config *types.E2EConfig) ([]string, []int) { - var multiAddrs []string +func StartBeaconNodes(t *testing.T, config *types.E2EConfig, enr string) []int { var processIDs []int for i := 0; i < e2e.TestParams.BeaconNodeCount; i++ { - multiAddr, pID := StartNewBeaconNode(t, config, multiAddrs) - multiAddrs = append(multiAddrs, multiAddr) + pID := StartNewBeaconNode(t, config, i, enr) processIDs = append(processIDs, pID) } - return multiAddrs, processIDs + return processIDs } // StartNewBeaconNode starts a fresh beacon node, connecting to all passed in beacon nodes. -func StartNewBeaconNode(t *testing.T, config *types.E2EConfig, multiAddrs []string) (string, int) { - index := len(multiAddrs) +func StartNewBeaconNode(t *testing.T, config *types.E2EConfig, index int, enr string) int { binaryPath, found := bazel.FindBinary("beacon-chain", "beacon-chain") if !found { t.Log(binaryPath) @@ -49,59 +46,88 @@ func StartNewBeaconNode(t *testing.T, config *types.E2EConfig, multiAddrs []stri fmt.Sprintf("--http-web3provider=http://127.0.0.1:%d", e2e.TestParams.Eth1RPCPort), fmt.Sprintf("--web3provider=ws://127.0.0.1:%d", e2e.TestParams.Eth1RPCPort+1), fmt.Sprintf("--min-sync-peers=%d", e2e.TestParams.BeaconNodeCount-1), - fmt.Sprintf("--p2p-udp-port=%d", e2e.TestParams.BeaconNodeRPCPort+index+10), //12200 - fmt.Sprintf("--p2p-tcp-port=%d", e2e.TestParams.BeaconNodeRPCPort+index+20), //13200 - fmt.Sprintf("--monitoring-port=%d", e2e.TestParams.BeaconNodeRPCPort+index+30), //8280 - fmt.Sprintf("--grpc-gateway-port=%d", e2e.TestParams.BeaconNodeRPCPort+index+40), // 3400 + fmt.Sprintf("--p2p-udp-port=%d", e2e.TestParams.BeaconNodeRPCPort+index+10), + fmt.Sprintf("--p2p-tcp-port=%d", e2e.TestParams.BeaconNodeRPCPort+index+20), + fmt.Sprintf("--monitoring-port=%d", e2e.TestParams.BeaconNodeRPCPort+index+30), + fmt.Sprintf("--grpc-gateway-port=%d", e2e.TestParams.BeaconNodeRPCPort+index+40), fmt.Sprintf("--contract-deployment-block=%d", 0), fmt.Sprintf("--rpc-max-page-size=%d", params.BeaconConfig().MinGenesisActiveValidatorCount), "--force-clear-db", - "--no-discovery", + fmt.Sprintf("--bootstrap-node=%s", enr), + "--verbosity=debug", } args = append(args, featureconfig.E2EBeaconChainFlags...) args = append(args, config.BeaconFlags...) - // After the first node is made, have all following nodes connect to all previously made nodes. - if index >= 1 { - for p := 0; p < index; p++ { - args = append(args, fmt.Sprintf("--peer=%s", multiAddrs[p])) - } - } - cmd := exec.Command(binaryPath, args...) t.Logf("Starting beacon chain %d with flags: %s", index, strings.Join(args[2:], " ")) if err := cmd.Start(); err != nil { t.Fatalf("Failed to start beacon node: %v", err) } - if err = helpers.WaitForTextInFile(stdOutFile, "Node started p2p server"); err != nil { + if err = helpers.WaitForTextInFile(stdOutFile, "RPC-API listening on port"); err != nil { t.Fatalf("could not find multiaddr for node %d, this means the node had issues starting: %v", index, err) } - multiAddr, err := getMultiAddrFromLogFile(stdOutFile.Name()) - if err != nil { - t.Fatalf("could not get multiaddr for node %d: %v", index, err) - } - - return multiAddr, cmd.Process.Pid + return cmd.Process.Pid } -func getMultiAddrFromLogFile(name string) (string, error) { +// StartBootnode starts a bootnode and returns its ENR and process ID. +func StartBootnode(t *testing.T) (string, int) { + binaryPath, found := bazel.FindBinary("tools/bootnode", "bootnode") + if !found { + t.Log(binaryPath) + t.Fatal("boot node binary not found") + } + + stdOutFile, err := helpers.DeleteAndCreateFile(e2e.TestParams.LogPath, e2e.BootNodeLogFileName) + if err != nil { + t.Fatal(err) + } + + args := []string{ + fmt.Sprintf("--log-file=%s", stdOutFile.Name()), + fmt.Sprintf("--discv5-port=%d", e2e.TestParams.BootNodePort), + fmt.Sprintf("--kad-port=%d", e2e.TestParams.BootNodePort+10), + fmt.Sprintf("--metrics-port=%d", e2e.TestParams.BootNodePort+20), + } + + cmd := exec.Command(binaryPath, args...) + cmd.Stdout = stdOutFile + cmd.Stderr = stdOutFile + t.Logf("Starting boot node with flags: %s", strings.Join(args[1:], " ")) + if err := cmd.Start(); err != nil { + t.Fatalf("Failed to start beacon node: %v", err) + } + + if err = helpers.WaitForTextInFile(stdOutFile, "Running bootnode"); err != nil { + t.Fatalf("could not find enr for bootnode, this means the bootnode had issues starting: %v", err) + } + + enr, err := getENRFromLogFile(stdOutFile.Name()) + if err != nil { + t.Fatalf("could not get enr for bootnode: %v", err) + } + + return enr, cmd.Process.Pid +} + +func getENRFromLogFile(name string) (string, error) { byteContent, err := ioutil.ReadFile(name) if err != nil { return "", err } contents := string(byteContent) - searchText := "\"Node started p2p server\" multiAddr=\"" + searchText := "Running bootnode: " startIdx := strings.Index(contents, searchText) if startIdx == -1 { - return "", fmt.Errorf("did not find peer text in %s", contents) + return "", fmt.Errorf("did not find ENR text in %s", contents) } startIdx += len(searchText) - endIdx := strings.Index(contents[startIdx:], "\"") + endIdx := strings.Index(contents[startIdx:], " prefix=bootnode") if endIdx == -1 { - return "", fmt.Errorf("did not find peer text in %s", contents) + return "", fmt.Errorf("did not find ENR text in %s", contents) } - return contents[startIdx : startIdx+endIdx], nil + return contents[startIdx : startIdx+endIdx-1], nil } diff --git a/endtoend/components/eth1.go b/endtoend/components/eth1.go index 06bfc7804e..748c5e022b 100644 --- a/endtoend/components/eth1.go +++ b/endtoend/components/eth1.go @@ -52,7 +52,7 @@ func StartEth1Node(t *testing.T) (string, int) { "--wsaddr=0.0.0.0", "--wsorigins=\"*\"", "--dev", - "--dev.period=0", + "--dev.period=2", "--ipcdisable", } cmd := exec.Command(binaryPath, args...) diff --git a/endtoend/endtoend_test.go b/endtoend/endtoend_test.go index 916036715d..86dbde051a 100644 --- a/endtoend/endtoend_test.go +++ b/endtoend/endtoend_test.go @@ -30,18 +30,14 @@ func runEndToEndTest(t *testing.T, config *types.E2EConfig) { t.Logf("Log Path: %s\n\n", e2e.TestParams.LogPath) keystorePath, eth1PID := components.StartEth1Node(t) - multiAddrs, bProcessIDs := components.StartBeaconNodes(t, config) + bootnodeENR, _ := components.StartBootnode(t) + bProcessIDs := components.StartBeaconNodes(t, config, bootnodeENR) valProcessIDs := components.StartValidators(t, config, keystorePath) processIDs := append(valProcessIDs, bProcessIDs...) processIDs = append(processIDs, eth1PID) defer helpers.LogOutput(t, config) defer helpers.KillProcesses(t, processIDs) - if config.TestSlasher { - slasherPIDs := components.StartSlashers(t) - defer helpers.KillProcesses(t, slasherPIDs) - } - beaconLogFile, err := os.Open(path.Join(e2e.TestParams.LogPath, fmt.Sprintf(e2e.BeaconNodeLogFileName, 0))) if err != nil { t.Fatal(err) @@ -77,6 +73,11 @@ func runEndToEndTest(t *testing.T, config *types.E2EConfig) { epochSeconds := params.BeaconConfig().SecondsPerSlot * params.BeaconConfig().SlotsPerEpoch genesisTime := time.Unix(genesis.GenesisTime.Seconds+int64(epochSeconds/2), 0) + if config.TestSlasher { + slasherPIDs := components.StartSlashers(t) + defer helpers.KillProcesses(t, slasherPIDs) + } + ticker := helpers.GetEpochTicker(genesisTime, epochSeconds) for currentEpoch := range ticker.C() { for _, evaluator := range config.Evaluators { @@ -104,9 +105,8 @@ func runEndToEndTest(t *testing.T, config *types.E2EConfig) { return } - multiAddr, processID := components.StartNewBeaconNode(t, config, multiAddrs) - multiAddrs = append(multiAddrs, multiAddr) index := e2e.TestParams.BeaconNodeCount + processID := components.StartNewBeaconNode(t, config, index, bootnodeENR) syncConn, err := grpc.Dial(fmt.Sprintf("127.0.0.1:%d", e2e.TestParams.BeaconNodeRPCPort+index), grpc.WithInsecure()) if err != nil { t.Fatalf("Failed to dial: %v", err) diff --git a/endtoend/evaluators/BUILD.bazel b/endtoend/evaluators/BUILD.bazel index d165b17af6..cbddb3f8b7 100644 --- a/endtoend/evaluators/BUILD.bazel +++ b/endtoend/evaluators/BUILD.bazel @@ -12,6 +12,7 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/endtoend/evaluators", visibility = ["//endtoend:__subpackages__"], deps = [ + "//beacon-chain/core/helpers:go_default_library", "//endtoend/types:go_default_library", "//shared/bytesutil:go_default_library", "//shared/params:go_default_library", @@ -21,7 +22,6 @@ go_library( "@com_github_pkg_errors//:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", - "@com_github_prysmaticlabs_go_ssz//:go_default_library", "@org_golang_google_grpc//:go_default_library", ], ) diff --git a/endtoend/evaluators/slashing.go b/endtoend/evaluators/slashing.go index 27dd73e3bf..278a876443 100644 --- a/endtoend/evaluators/slashing.go +++ b/endtoend/evaluators/slashing.go @@ -6,7 +6,7 @@ import ( ptypes "github.com/gogo/protobuf/types" eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/endtoend/types" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/params" @@ -80,15 +80,15 @@ func insertDoubleAttestationIntoPool(conns ...*grpc.ClientConn) error { blockRoot := bytesutil.ToBytes32([]byte("muahahahaha I'm an evil validator")) attData.BeaconBlockRoot = blockRoot[:] - dataRoot, err := ssz.HashTreeRoot(attData) + req := ð.DomainRequest{ + Epoch: chainHead.HeadEpoch, + Domain: params.BeaconConfig().DomainBeaconAttester[:], + } + resp, err := valClient.DomainData(ctx, req) if err != nil { return err } - - domainResp, err := valClient.DomainData(ctx, ð.DomainRequest{ - Epoch: attData.Target.Epoch, - Domain: params.BeaconConfig().DomainBeaconAttester[:], - }) + signingRoot, err := helpers.ComputeSigningRoot(attData, resp.SignatureDomain) if err != nil { return err } @@ -106,7 +106,7 @@ func insertDoubleAttestationIntoPool(conns ...*grpc.ClientConn) error { att := ð.Attestation{ AggregationBits: attBitfield, Data: attData, - Signature: privKeys[committee[i]].Sign(dataRoot[:], domainResp.SignatureDomain).Marshal(), + Signature: privKeys[committee[i]].Sign(signingRoot[:]).Marshal(), } for _, conn := range conns { client := eth.NewBeaconNodeValidatorClient(conn) diff --git a/endtoend/helpers/helpers.go b/endtoend/helpers/helpers.go index 1f510f64da..9560eaef1c 100644 --- a/endtoend/helpers/helpers.go +++ b/endtoend/helpers/helpers.go @@ -140,5 +140,4 @@ func LogErrorOutput(t *testing.T, file io.Reader, title string, index int) { } t.Log(err) } - t.Logf("===================== End of %s %d error output ====================\n", title, index) } diff --git a/endtoend/minimal_antiflake_e2e_1_test.go b/endtoend/minimal_antiflake_e2e_1_test.go index 5b8914f49d..84a50d6a92 100644 --- a/endtoend/minimal_antiflake_e2e_1_test.go +++ b/endtoend/minimal_antiflake_e2e_1_test.go @@ -17,12 +17,13 @@ func TestEndToEnd_AntiFlake_MinimalConfig_1(t *testing.T) { minimalConfig := &types.E2EConfig{ BeaconFlags: []string{"--minimal-config", "--custom-genesis-delay=10"}, ValidatorFlags: []string{"--minimal-config"}, - EpochsToRun: 3, + EpochsToRun: 4, TestSync: false, TestSlasher: false, Evaluators: []types.Evaluator{ ev.PeersConnect, ev.ValidatorsAreActive, + ev.ValidatorsParticipating, }, } if err := e2eParams.Init(4); err != nil { diff --git a/endtoend/minimal_antiflake_e2e_2_test.go b/endtoend/minimal_antiflake_e2e_2_test.go index 547ea48769..e788b80d04 100644 --- a/endtoend/minimal_antiflake_e2e_2_test.go +++ b/endtoend/minimal_antiflake_e2e_2_test.go @@ -17,12 +17,13 @@ func TestEndToEnd_AntiFlake_MinimalConfig_2(t *testing.T) { minimalConfig := &types.E2EConfig{ BeaconFlags: []string{"--minimal-config", "--custom-genesis-delay=10"}, ValidatorFlags: []string{"--minimal-config"}, - EpochsToRun: 3, + EpochsToRun: 4, TestSync: false, TestSlasher: false, Evaluators: []types.Evaluator{ ev.PeersConnect, ev.ValidatorsAreActive, + ev.ValidatorsParticipating, }, } if err := e2eParams.Init(4); err != nil { diff --git a/endtoend/params/params.go b/endtoend/params/params.go index d0c7c6fb4a..a1bb3c8ac8 100644 --- a/endtoend/params/params.go +++ b/endtoend/params/params.go @@ -2,7 +2,9 @@ package params import ( "errors" + "fmt" "os" + "path" "strconv" "github.com/bazelbuild/rules_go/go/tools/bazel" @@ -17,6 +19,7 @@ type Params struct { BeaconNodeCount int Eth1RPCPort int ContractAddress common.Address + BootNodePort int BeaconNodeRPCPort int BeaconNodeMetricsPort int ValidatorMetricsPort int @@ -27,6 +30,9 @@ type Params struct { // TestParams is the globally accessible var for getting config elements. var TestParams *Params +// BootNodeLogFileName is the file name used for the beacon chain node logs. +var BootNodeLogFileName = "bootnode.log" + // BeaconNodeLogFileName is the file name used for the beacon chain node logs. var BeaconNodeLogFileName = "beacon-%d.log" @@ -54,12 +60,13 @@ func Init(beaconNodeCount int) error { } TestParams = &Params{ - TestPath: testPath, + TestPath: path.Join(testPath, fmt.Sprintf("shard-%d", testIndex)), LogPath: logPath, TestShardIndex: testIndex, BeaconNodeCount: beaconNodeCount, Eth1RPCPort: 3100 + testIndex*100, // Multiplying 100 here so the test index doesn't conflict with the other node ports. - BeaconNodeRPCPort: 4100 + testIndex*100, + BootNodePort: 4100 + testIndex*100, + BeaconNodeRPCPort: 4150 + testIndex*100, BeaconNodeMetricsPort: 5100 + testIndex*100, ValidatorMetricsPort: 6100 + testIndex*100, SlasherRPCPort: 7100 + testIndex*100, diff --git a/proto/beacon/db/attestation_container.pb.go b/proto/beacon/db/attestation_container.pb.go index 462366c136..077567e979 100755 --- a/proto/beacon/db/attestation_container.pb.go +++ b/proto/beacon/db/attestation_container.pb.go @@ -5,13 +5,14 @@ package db import ( fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" v1alpha1 "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" github_com_prysmaticlabs_go_bitfield "github.com/prysmaticlabs/go-bitfield" - io "io" - math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/proto/beacon/db/finalized_block_root_container.pb.go b/proto/beacon/db/finalized_block_root_container.pb.go index 446e609773..f1cbb44283 100755 --- a/proto/beacon/db/finalized_block_root_container.pb.go +++ b/proto/beacon/db/finalized_block_root_container.pb.go @@ -5,10 +5,11 @@ package db import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/proto/beacon/db/powchain.pb.go b/proto/beacon/db/powchain.pb.go index 4cb1b16eba..297a2fb606 100755 --- a/proto/beacon/db/powchain.pb.go +++ b/proto/beacon/db/powchain.pb.go @@ -5,12 +5,13 @@ package db import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" - v1alpha1 "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" + v1alpha1 "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/proto/beacon/p2p/v1/BUILD.bazel b/proto/beacon/p2p/v1/BUILD.bazel index 8f96cb49a4..600ca0d2ee 100644 --- a/proto/beacon/p2p/v1/BUILD.bazel +++ b/proto/beacon/p2p/v1/BUILD.bazel @@ -26,6 +26,8 @@ ssz_gen_marshal( ], objs = [ "BeaconBlocksByRangeRequest", + "ENRForkID", + "MetaData", "Fork", "HistoricalBatch", "Status", diff --git a/proto/beacon/p2p/v1/archive.pb.go b/proto/beacon/p2p/v1/archive.pb.go index 6178484eeb..5ce5c621e0 100755 --- a/proto/beacon/p2p/v1/archive.pb.go +++ b/proto/beacon/p2p/v1/archive.pb.go @@ -5,12 +5,13 @@ package ethereum_beacon_p2p_v1 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - v1alpha1 "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" io "io" math "math" math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + v1alpha1 "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/proto/beacon/p2p/v1/messages.pb.go b/proto/beacon/p2p/v1/messages.pb.go index da1cfa7ad2..d7e5a02c56 100755 --- a/proto/beacon/p2p/v1/messages.pb.go +++ b/proto/beacon/p2p/v1/messages.pb.go @@ -5,11 +5,13 @@ package ethereum_beacon_p2p_v1 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_prysmaticlabs_go_bitfield "github.com/prysmaticlabs/go-bitfield" ) // Reference imports to suppress errors if they are not otherwise used. @@ -24,7 +26,7 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Status struct { - HeadForkVersion []byte `protobuf:"bytes,1,opt,name=head_fork_version,json=headForkVersion,proto3" json:"head_fork_version,omitempty" ssz-size:"4"` + ForkDigest []byte `protobuf:"bytes,1,opt,name=fork_digest,json=forkDigest,proto3" json:"fork_digest,omitempty" ssz-size:"4"` FinalizedRoot []byte `protobuf:"bytes,2,opt,name=finalized_root,json=finalizedRoot,proto3" json:"finalized_root,omitempty" ssz-size:"32"` FinalizedEpoch uint64 `protobuf:"varint,3,opt,name=finalized_epoch,json=finalizedEpoch,proto3" json:"finalized_epoch,omitempty"` HeadRoot []byte `protobuf:"bytes,4,opt,name=head_root,json=headRoot,proto3" json:"head_root,omitempty" ssz-size:"32"` @@ -67,9 +69,9 @@ func (m *Status) XXX_DiscardUnknown() { var xxx_messageInfo_Status proto.InternalMessageInfo -func (m *Status) GetHeadForkVersion() []byte { +func (m *Status) GetForkDigest() []byte { if m != nil { - return m.HeadForkVersion + return m.ForkDigest } return nil } @@ -103,10 +105,9 @@ func (m *Status) GetHeadSlot() uint64 { } type BeaconBlocksByRangeRequest struct { - HeadBlockRoot []byte `protobuf:"bytes,1,opt,name=head_block_root,json=headBlockRoot,proto3" json:"head_block_root,omitempty" ssz-size:"32"` - StartSlot uint64 `protobuf:"varint,2,opt,name=start_slot,json=startSlot,proto3" json:"start_slot,omitempty"` - Count uint64 `protobuf:"varint,3,opt,name=count,proto3" json:"count,omitempty"` - Step uint64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"` + StartSlot uint64 `protobuf:"varint,1,opt,name=start_slot,json=startSlot,proto3" json:"start_slot,omitempty"` + Count uint64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + Step uint64 `protobuf:"varint,3,opt,name=step,proto3" json:"step,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -145,13 +146,6 @@ func (m *BeaconBlocksByRangeRequest) XXX_DiscardUnknown() { var xxx_messageInfo_BeaconBlocksByRangeRequest proto.InternalMessageInfo -func (m *BeaconBlocksByRangeRequest) GetHeadBlockRoot() []byte { - if m != nil { - return m.HeadBlockRoot - } - return nil -} - func (m *BeaconBlocksByRangeRequest) GetStartSlot() uint64 { if m != nil { return m.StartSlot @@ -173,9 +167,129 @@ func (m *BeaconBlocksByRangeRequest) GetStep() uint64 { return 0 } +type ENRForkID struct { + CurrentForkDigest []byte `protobuf:"bytes,1,opt,name=current_fork_digest,json=currentForkDigest,proto3" json:"current_fork_digest,omitempty" ssz-size:"4"` + NextForkVersion []byte `protobuf:"bytes,2,opt,name=next_fork_version,json=nextForkVersion,proto3" json:"next_fork_version,omitempty" ssz-size:"4"` + NextForkEpoch uint64 `protobuf:"varint,3,opt,name=next_fork_epoch,json=nextForkEpoch,proto3" json:"next_fork_epoch,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ENRForkID) Reset() { *m = ENRForkID{} } +func (m *ENRForkID) String() string { return proto.CompactTextString(m) } +func (*ENRForkID) ProtoMessage() {} +func (*ENRForkID) Descriptor() ([]byte, []int) { + return fileDescriptor_a1d590cda035b632, []int{2} +} +func (m *ENRForkID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ENRForkID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ENRForkID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ENRForkID) XXX_Merge(src proto.Message) { + xxx_messageInfo_ENRForkID.Merge(m, src) +} +func (m *ENRForkID) XXX_Size() int { + return m.Size() +} +func (m *ENRForkID) XXX_DiscardUnknown() { + xxx_messageInfo_ENRForkID.DiscardUnknown(m) +} + +var xxx_messageInfo_ENRForkID proto.InternalMessageInfo + +func (m *ENRForkID) GetCurrentForkDigest() []byte { + if m != nil { + return m.CurrentForkDigest + } + return nil +} + +func (m *ENRForkID) GetNextForkVersion() []byte { + if m != nil { + return m.NextForkVersion + } + return nil +} + +func (m *ENRForkID) GetNextForkEpoch() uint64 { + if m != nil { + return m.NextForkEpoch + } + return 0 +} + +type MetaData struct { + SeqNumber uint64 `protobuf:"varint,1,opt,name=seq_number,json=seqNumber,proto3" json:"seq_number,omitempty"` + Attnets github_com_prysmaticlabs_go_bitfield.Bitvector64 `protobuf:"bytes,2,opt,name=attnets,proto3,casttype=github.com/prysmaticlabs/go-bitfield.Bitvector64" json:"attnets,omitempty" ssz-size:"64"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetaData) Reset() { *m = MetaData{} } +func (m *MetaData) String() string { return proto.CompactTextString(m) } +func (*MetaData) ProtoMessage() {} +func (*MetaData) Descriptor() ([]byte, []int) { + return fileDescriptor_a1d590cda035b632, []int{3} +} +func (m *MetaData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetaData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetaData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetaData) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetaData.Merge(m, src) +} +func (m *MetaData) XXX_Size() int { + return m.Size() +} +func (m *MetaData) XXX_DiscardUnknown() { + xxx_messageInfo_MetaData.DiscardUnknown(m) +} + +var xxx_messageInfo_MetaData proto.InternalMessageInfo + +func (m *MetaData) GetSeqNumber() uint64 { + if m != nil { + return m.SeqNumber + } + return 0 +} + +func (m *MetaData) GetAttnets() github_com_prysmaticlabs_go_bitfield.Bitvector64 { + if m != nil { + return m.Attnets + } + return nil +} + func init() { proto.RegisterType((*Status)(nil), "ethereum.beacon.p2p.v1.Status") proto.RegisterType((*BeaconBlocksByRangeRequest)(nil), "ethereum.beacon.p2p.v1.BeaconBlocksByRangeRequest") + proto.RegisterType((*ENRForkID)(nil), "ethereum.beacon.p2p.v1.ENRForkID") + proto.RegisterType((*MetaData)(nil), "ethereum.beacon.p2p.v1.MetaData") } func init() { @@ -183,30 +297,38 @@ func init() { } var fileDescriptor_a1d590cda035b632 = []byte{ - // 365 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x6e, 0xb2, 0x40, - 0x10, 0xc7, 0xb3, 0x7e, 0x68, 0x3e, 0x37, 0xfa, 0xf9, 0xb9, 0x69, 0x1a, 0x62, 0x53, 0x35, 0x5c, - 0xea, 0x45, 0x88, 0xda, 0x43, 0xdb, 0xf4, 0x44, 0xd2, 0x3e, 0x00, 0x26, 0xbd, 0x12, 0xc0, 0x11, - 0x88, 0xc8, 0x50, 0x76, 0x31, 0xa9, 0x4f, 0xd3, 0xc7, 0xe9, 0xb1, 0x4f, 0x60, 0x1a, 0x1f, 0xc1, - 0x43, 0xcf, 0x0d, 0x83, 0xa9, 0x27, 0x6f, 0xbb, 0x33, 0xbf, 0xf9, 0xfd, 0x19, 0x96, 0x1b, 0x59, - 0x8e, 0x0a, 0x2d, 0x1f, 0xbc, 0x00, 0x53, 0x2b, 0x9b, 0x66, 0xd6, 0x66, 0x62, 0xad, 0x41, 0x4a, - 0x2f, 0x04, 0x69, 0x52, 0x53, 0x5c, 0x82, 0x8a, 0x20, 0x87, 0x62, 0x6d, 0x56, 0x98, 0x99, 0x4d, - 0x33, 0x73, 0x33, 0xe9, 0x8d, 0xc3, 0x58, 0x45, 0x85, 0x6f, 0x06, 0xb8, 0xb6, 0x42, 0x0c, 0xd1, - 0x22, 0xdc, 0x2f, 0x96, 0x74, 0xab, 0xc4, 0xe5, 0xa9, 0xd2, 0x18, 0xdf, 0x8c, 0x37, 0xe6, 0xca, - 0x53, 0x85, 0x14, 0x8f, 0xbc, 0x1b, 0x81, 0xb7, 0x70, 0x97, 0x98, 0xaf, 0xdc, 0x0d, 0xe4, 0x32, - 0xc6, 0x54, 0x67, 0x43, 0x36, 0x6a, 0xd9, 0xff, 0x0f, 0xbb, 0x41, 0x4b, 0xca, 0xed, 0x58, 0xc6, - 0x5b, 0x78, 0x30, 0x6e, 0x0d, 0xa7, 0x53, 0xa2, 0xcf, 0x98, 0xaf, 0x5e, 0x2a, 0x50, 0xdc, 0xf1, - 0x7f, 0xcb, 0x38, 0xf5, 0x92, 0x78, 0x0b, 0x0b, 0x37, 0x47, 0x54, 0x7a, 0x8d, 0x46, 0xbb, 0x87, - 0xdd, 0xa0, 0x7d, 0x1a, 0x9d, 0x4d, 0x0d, 0xa7, 0xfd, 0x0b, 0x3a, 0x88, 0x4a, 0xdc, 0xf0, 0xce, - 0x69, 0x12, 0x32, 0x0c, 0x22, 0xfd, 0xcf, 0x90, 0x8d, 0x34, 0xe7, 0x24, 0x7c, 0x2a, 0xab, 0xc2, - 0xe4, 0x4d, 0xfa, 0x40, 0xb2, 0x6b, 0xe7, 0xec, 0x7f, 0x4b, 0x86, 0xc4, 0x57, 0x47, 0x5e, 0x26, - 0xa8, 0xf4, 0x3a, 0x29, 0xa9, 0x39, 0x4f, 0x50, 0x19, 0xef, 0x8c, 0xf7, 0x6c, 0xfa, 0x73, 0x76, - 0x82, 0xc1, 0x4a, 0xda, 0x6f, 0x8e, 0x97, 0x86, 0xe0, 0xc0, 0x6b, 0x01, 0x52, 0x89, 0x7b, 0x4e, - 0x1b, 0xba, 0x7e, 0xd9, 0xac, 0x12, 0xd9, 0xd9, 0x7d, 0x4a, 0x92, 0x2c, 0x14, 0x7b, 0xcd, 0xb9, - 0x54, 0x5e, 0xae, 0xaa, 0xdc, 0x1a, 0xe5, 0x36, 0xa9, 0x52, 0x06, 0x8b, 0x0b, 0x5e, 0x0f, 0xb0, - 0x48, 0xd5, 0x71, 0xc9, 0xea, 0x22, 0x04, 0xd7, 0xa4, 0x82, 0x8c, 0xd6, 0xd2, 0x1c, 0x3a, 0xdb, - 0xad, 0x8f, 0x7d, 0x9f, 0x7d, 0xee, 0xfb, 0xec, 0x6b, 0xdf, 0x67, 0x7e, 0x83, 0x1e, 0x6c, 0xf6, - 0x13, 0x00, 0x00, 0xff, 0xff, 0x12, 0xe0, 0xa8, 0xc9, 0x1d, 0x02, 0x00, 0x00, + // 490 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xb1, 0x6e, 0xd3, 0x40, + 0x18, 0xc7, 0x65, 0x48, 0x4b, 0x7b, 0x24, 0x84, 0x1c, 0x08, 0x45, 0x45, 0x24, 0x95, 0x07, 0xe8, + 0x12, 0x9b, 0xa4, 0x51, 0x85, 0x10, 0x03, 0xb2, 0x92, 0x4a, 0x0c, 0x74, 0x70, 0x25, 0xd6, 0xe8, + 0xec, 0x7c, 0x71, 0x4e, 0x71, 0xfc, 0x39, 0x77, 0x9f, 0x23, 0x9a, 0x47, 0xe0, 0x75, 0x78, 0x09, + 0x46, 0x9e, 0x20, 0x42, 0x59, 0xd9, 0x3a, 0x32, 0x21, 0x9f, 0xdd, 0xba, 0x08, 0x21, 0xb1, 0xf9, + 0xbe, 0xfb, 0xfd, 0xfe, 0xba, 0xfb, 0xfb, 0x98, 0x9d, 0x2a, 0x24, 0x74, 0x03, 0x10, 0x21, 0x26, + 0x6e, 0x3a, 0x48, 0xdd, 0x75, 0xdf, 0x5d, 0x82, 0xd6, 0x22, 0x02, 0xed, 0x98, 0x4d, 0xfe, 0x0c, + 0x68, 0x0e, 0x0a, 0xb2, 0xa5, 0x53, 0x60, 0x4e, 0x3a, 0x48, 0x9d, 0x75, 0xff, 0xa8, 0x17, 0x49, + 0x9a, 0x67, 0x81, 0x13, 0xe2, 0xd2, 0x8d, 0x30, 0x42, 0xd7, 0xe0, 0x41, 0x36, 0x33, 0xab, 0x22, + 0x38, 0xff, 0x2a, 0x62, 0xec, 0x9f, 0x16, 0xdb, 0xbf, 0x24, 0x41, 0x99, 0xe6, 0x7d, 0xf6, 0x70, + 0x86, 0x6a, 0x31, 0x99, 0xca, 0x08, 0x34, 0xb5, 0xad, 0x63, 0xeb, 0xa4, 0xee, 0x3d, 0xbe, 0xde, + 0x76, 0xeb, 0x5a, 0x6f, 0x7a, 0x5a, 0x6e, 0xe0, 0xad, 0x3d, 0xb4, 0x7d, 0x96, 0x43, 0x23, 0xc3, + 0xf0, 0x37, 0xec, 0xd1, 0x4c, 0x26, 0x22, 0x96, 0x1b, 0x98, 0x4e, 0x14, 0x22, 0xb5, 0xef, 0x19, + 0xab, 0x75, 0xbd, 0xed, 0x36, 0x2a, 0xeb, 0x74, 0x60, 0xfb, 0x8d, 0x5b, 0xd0, 0x47, 0x24, 0xfe, + 0x8a, 0x35, 0x2b, 0x13, 0x52, 0x0c, 0xe7, 0xed, 0xfb, 0xc7, 0xd6, 0x49, 0xcd, 0xaf, 0x02, 0xc7, + 0xf9, 0x94, 0x3b, 0xec, 0x70, 0x0e, 0xa2, 0x4c, 0xaf, 0xfd, 0x2b, 0xfd, 0x20, 0x67, 0x4c, 0xf0, + 0xf3, 0x92, 0xd7, 0x31, 0x52, 0x7b, 0xcf, 0x44, 0x9a, 0xcd, 0xcb, 0x18, 0xc9, 0x06, 0x76, 0xe4, + 0x99, 0xb6, 0xbc, 0x18, 0xc3, 0x85, 0xf6, 0xae, 0x7c, 0x91, 0x44, 0xe0, 0xc3, 0x2a, 0xcb, 0x6f, + 0xf3, 0x82, 0x31, 0x4d, 0x42, 0x51, 0xe1, 0x5a, 0xc6, 0x3d, 0x34, 0x93, 0x5c, 0xe6, 0x4f, 0xd9, + 0x5e, 0x88, 0x59, 0x52, 0xdc, 0xb1, 0xe6, 0x17, 0x0b, 0xce, 0x59, 0x4d, 0x13, 0xa4, 0xe5, 0xe9, + 0xcd, 0xb7, 0xfd, 0xd5, 0x62, 0x87, 0xe3, 0x0b, 0xff, 0x1c, 0xd5, 0xe2, 0xc3, 0x88, 0xbf, 0x67, + 0x4f, 0xc2, 0x4c, 0x29, 0x48, 0x68, 0xf2, 0x3f, 0xfd, 0xb6, 0x4a, 0xf8, 0xbc, 0xaa, 0xf9, 0x1d, + 0x6b, 0x25, 0xf0, 0xb9, 0xd4, 0xd7, 0xa0, 0xb4, 0xc4, 0xa4, 0x6c, 0xfa, 0x6f, 0xbf, 0x99, 0xa3, + 0xb9, 0xfc, 0xa9, 0x00, 0xf9, 0x4b, 0xd6, 0xac, 0xec, 0xbb, 0x55, 0x37, 0x6e, 0x48, 0xd3, 0xb4, + 0xfd, 0xc5, 0x62, 0x07, 0x1f, 0x81, 0xc4, 0x48, 0x90, 0x30, 0x5d, 0xc0, 0x6a, 0x92, 0x64, 0xcb, + 0x00, 0xd4, 0x6d, 0x17, 0xb0, 0xba, 0x30, 0x03, 0x3e, 0x61, 0x0f, 0x04, 0x51, 0x02, 0xa4, 0xcb, + 0x73, 0x8c, 0xff, 0xfc, 0x27, 0x67, 0x43, 0xfb, 0xd7, 0xb6, 0xfb, 0xfa, 0xce, 0x5b, 0x4c, 0xd5, + 0x95, 0x5e, 0x0a, 0x92, 0x61, 0x2c, 0x02, 0xed, 0x46, 0xd8, 0x0b, 0x24, 0xcd, 0x24, 0xc4, 0x53, + 0xc7, 0x93, 0xb4, 0x86, 0x90, 0x50, 0x9d, 0x0d, 0xfd, 0x9b, 0x54, 0xaf, 0xfe, 0x6d, 0xd7, 0xb1, + 0xbe, 0xef, 0x3a, 0xd6, 0x8f, 0x5d, 0xc7, 0x0a, 0xf6, 0xcd, 0x63, 0x3d, 0xfd, 0x1d, 0x00, 0x00, + 0xff, 0xff, 0xd5, 0xcc, 0x55, 0xef, 0x19, 0x03, 0x00, 0x00, } func (m *Status) Marshal() (dAtA []byte, err error) { @@ -257,10 +379,10 @@ func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.HeadForkVersion) > 0 { - i -= len(m.HeadForkVersion) - copy(dAtA[i:], m.HeadForkVersion) - i = encodeVarintMessages(dAtA, i, uint64(len(m.HeadForkVersion))) + if len(m.ForkDigest) > 0 { + i -= len(m.ForkDigest) + copy(dAtA[i:], m.ForkDigest) + i = encodeVarintMessages(dAtA, i, uint64(len(m.ForkDigest))) i-- dAtA[i] = 0xa } @@ -294,28 +416,106 @@ func (m *BeaconBlocksByRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, err if m.Step != 0 { i = encodeVarintMessages(dAtA, i, uint64(m.Step)) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x18 } if m.Count != 0 { i = encodeVarintMessages(dAtA, i, uint64(m.Count)) i-- - dAtA[i] = 0x18 + dAtA[i] = 0x10 } if m.StartSlot != 0 { i = encodeVarintMessages(dAtA, i, uint64(m.StartSlot)) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x8 } - if len(m.HeadBlockRoot) > 0 { - i -= len(m.HeadBlockRoot) - copy(dAtA[i:], m.HeadBlockRoot) - i = encodeVarintMessages(dAtA, i, uint64(len(m.HeadBlockRoot))) + return len(dAtA) - i, nil +} + +func (m *ENRForkID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ENRForkID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ENRForkID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.NextForkEpoch != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.NextForkEpoch)) + i-- + dAtA[i] = 0x18 + } + if len(m.NextForkVersion) > 0 { + i -= len(m.NextForkVersion) + copy(dAtA[i:], m.NextForkVersion) + i = encodeVarintMessages(dAtA, i, uint64(len(m.NextForkVersion))) + i-- + dAtA[i] = 0x12 + } + if len(m.CurrentForkDigest) > 0 { + i -= len(m.CurrentForkDigest) + copy(dAtA[i:], m.CurrentForkDigest) + i = encodeVarintMessages(dAtA, i, uint64(len(m.CurrentForkDigest))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } +func (m *MetaData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetaData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetaData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Attnets) > 0 { + i -= len(m.Attnets) + copy(dAtA[i:], m.Attnets) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Attnets))) + i-- + dAtA[i] = 0x12 + } + if m.SeqNumber != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.SeqNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintMessages(dAtA []byte, offset int, v uint64) int { offset -= sovMessages(v) base := offset @@ -333,7 +533,7 @@ func (m *Status) Size() (n int) { } var l int _ = l - l = len(m.HeadForkVersion) + l = len(m.ForkDigest) if l > 0 { n += 1 + l + sovMessages(uint64(l)) } @@ -363,10 +563,6 @@ func (m *BeaconBlocksByRangeRequest) Size() (n int) { } var l int _ = l - l = len(m.HeadBlockRoot) - if l > 0 { - n += 1 + l + sovMessages(uint64(l)) - } if m.StartSlot != 0 { n += 1 + sovMessages(uint64(m.StartSlot)) } @@ -382,6 +578,48 @@ func (m *BeaconBlocksByRangeRequest) Size() (n int) { return n } +func (m *ENRForkID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CurrentForkDigest) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + l = len(m.NextForkVersion) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + if m.NextForkEpoch != 0 { + n += 1 + sovMessages(uint64(m.NextForkEpoch)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MetaData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SeqNumber != 0 { + n += 1 + sovMessages(uint64(m.SeqNumber)) + } + l = len(m.Attnets) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovMessages(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -419,7 +657,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HeadForkVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ForkDigest", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -446,9 +684,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HeadForkVersion = append(m.HeadForkVersion[:0], dAtA[iNdEx:postIndex]...) - if m.HeadForkVersion == nil { - m.HeadForkVersion = []byte{} + m.ForkDigest = append(m.ForkDigest[:0], dAtA[iNdEx:postIndex]...) + if m.ForkDigest == nil { + m.ForkDigest = []byte{} } iNdEx = postIndex case 2: @@ -611,9 +849,120 @@ func (m *BeaconBlocksByRangeRequest) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: BeaconBlocksByRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartSlot", wireType) + } + m.StartSlot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartSlot |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + m.Step = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Step |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ENRForkID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ENRForkID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ENRForkID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HeadBlockRoot", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentForkDigest", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -640,16 +989,16 @@ func (m *BeaconBlocksByRangeRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HeadBlockRoot = append(m.HeadBlockRoot[:0], dAtA[iNdEx:postIndex]...) - if m.HeadBlockRoot == nil { - m.HeadBlockRoot = []byte{} + m.CurrentForkDigest = append(m.CurrentForkDigest[:0], dAtA[iNdEx:postIndex]...) + if m.CurrentForkDigest == nil { + m.CurrentForkDigest = []byte{} } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartSlot", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextForkVersion", wireType) } - m.StartSlot = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMessages @@ -659,16 +1008,31 @@ func (m *BeaconBlocksByRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartSlot |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextForkVersion = append(m.NextForkVersion[:0], dAtA[iNdEx:postIndex]...) + if m.NextForkVersion == nil { + m.NextForkVersion = []byte{} + } + iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NextForkEpoch", wireType) } - m.Count = 0 + m.NextForkEpoch = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMessages @@ -678,16 +1042,70 @@ func (m *BeaconBlocksByRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= uint64(b&0x7F) << shift + m.NextForkEpoch |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetaData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetaData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetaData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SeqNumber", wireType) } - m.Step = 0 + m.SeqNumber = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMessages @@ -697,11 +1115,45 @@ func (m *BeaconBlocksByRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Step |= uint64(b&0x7F) << shift + m.SeqNumber |= uint64(b&0x7F) << shift if b < 0x80 { break } } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attnets", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attnets = append(m.Attnets[:0], dAtA[iNdEx:postIndex]...) + if m.Attnets == nil { + m.Attnets = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMessages(dAtA[iNdEx:]) diff --git a/proto/beacon/p2p/v1/messages.proto b/proto/beacon/p2p/v1/messages.proto index 2246e49626..76d138c2b0 100644 --- a/proto/beacon/p2p/v1/messages.proto +++ b/proto/beacon/p2p/v1/messages.proto @@ -5,7 +5,7 @@ package ethereum.beacon.p2p.v1; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; message Status { - bytes head_fork_version = 1 [(gogoproto.moretags) = "ssz-size:\"4\""]; + bytes fork_digest = 1 [(gogoproto.moretags) = "ssz-size:\"4\""]; bytes finalized_root = 2 [(gogoproto.moretags) = "ssz-size:\"32\""]; uint64 finalized_epoch = 3; bytes head_root = 4 [(gogoproto.moretags) = "ssz-size:\"32\""]; @@ -13,8 +13,25 @@ message Status { } message BeaconBlocksByRangeRequest { - bytes head_block_root = 1 [(gogoproto.moretags) = "ssz-size:\"32\""]; - uint64 start_slot = 2; - uint64 count = 3; - uint64 step = 4; + uint64 start_slot = 1; + uint64 count = 2; + uint64 step = 3; } + +message ENRForkID { + bytes current_fork_digest = 1 [(gogoproto.moretags) = "ssz-size:\"4\""]; + bytes next_fork_version = 2 [(gogoproto.moretags) = "ssz-size:\"4\""]; + uint64 next_fork_epoch = 3; +} +/* + Spec Definition: + MetaData + ( + seq_number: uint64 + attnets: Bitvector[ATTESTATION_SUBNET_COUNT] + ) +*/ +message MetaData { + uint64 seq_number =1; + bytes attnets = 2 [(gogoproto.moretags) = "ssz-size:\"8\"", (gogoproto.casttype) = "github.com/prysmaticlabs/go-bitfield.Bitvector64"]; +} \ No newline at end of file diff --git a/proto/beacon/p2p/v1/types.pb.go b/proto/beacon/p2p/v1/types.pb.go index b1a6800e1f..52f23d415d 100755 --- a/proto/beacon/p2p/v1/types.pb.go +++ b/proto/beacon/p2p/v1/types.pb.go @@ -5,13 +5,14 @@ package ethereum_beacon_p2p_v1 import ( fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" v1alpha1 "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" github_com_prysmaticlabs_go_bitfield "github.com/prysmaticlabs/go-bitfield" - io "io" - math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -27,8 +28,9 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type BeaconState struct { GenesisTime uint64 `protobuf:"varint,1001,opt,name=genesis_time,json=genesisTime,proto3" json:"genesis_time,omitempty"` - Slot uint64 `protobuf:"varint,1002,opt,name=slot,proto3" json:"slot,omitempty"` - Fork *Fork `protobuf:"bytes,1003,opt,name=fork,proto3" json:"fork,omitempty"` + GenesisValidatorsRoot []byte `protobuf:"bytes,1002,opt,name=genesis_validators_root,json=genesisValidatorsRoot,proto3" json:"genesis_validators_root,omitempty" ssz-size:"32"` + Slot uint64 `protobuf:"varint,1003,opt,name=slot,proto3" json:"slot,omitempty"` + Fork *Fork `protobuf:"bytes,1004,opt,name=fork,proto3" json:"fork,omitempty"` LatestBlockHeader *v1alpha1.BeaconBlockHeader `protobuf:"bytes,2001,opt,name=latest_block_header,json=latestBlockHeader,proto3" json:"latest_block_header,omitempty"` BlockRoots [][]byte `protobuf:"bytes,2002,rep,name=block_roots,json=blockRoots,proto3" json:"block_roots,omitempty" ssz-size:"8192,32"` StateRoots [][]byte `protobuf:"bytes,2003,rep,name=state_roots,json=stateRoots,proto3" json:"state_roots,omitempty" ssz-size:"8192,32"` @@ -91,6 +93,13 @@ func (m *BeaconState) GetGenesisTime() uint64 { return 0 } +func (m *BeaconState) GetGenesisValidatorsRoot() []byte { + if m != nil { + return m.GenesisValidatorsRoot + } + return nil +} + func (m *BeaconState) GetSlot() uint64 { if m != nil { return m.Slot @@ -523,6 +532,171 @@ func (m *StateSummary) GetRoot() []byte { return nil } +type SigningRoot struct { + ObjectRoot []byte `protobuf:"bytes,1,opt,name=object_root,json=objectRoot,proto3" json:"object_root,omitempty" ssz-size:"32"` + Domain []byte `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty" ssz-size:"32"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SigningRoot) Reset() { *m = SigningRoot{} } +func (m *SigningRoot) String() string { return proto.CompactTextString(m) } +func (*SigningRoot) ProtoMessage() {} +func (*SigningRoot) Descriptor() ([]byte, []int) { + return fileDescriptor_e719e7d82cfa7b0d, []int{6} +} +func (m *SigningRoot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SigningRoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SigningRoot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SigningRoot) XXX_Merge(src proto.Message) { + xxx_messageInfo_SigningRoot.Merge(m, src) +} +func (m *SigningRoot) XXX_Size() int { + return m.Size() +} +func (m *SigningRoot) XXX_DiscardUnknown() { + xxx_messageInfo_SigningRoot.DiscardUnknown(m) +} + +var xxx_messageInfo_SigningRoot proto.InternalMessageInfo + +func (m *SigningRoot) GetObjectRoot() []byte { + if m != nil { + return m.ObjectRoot + } + return nil +} + +func (m *SigningRoot) GetDomain() []byte { + if m != nil { + return m.Domain + } + return nil +} + +type ForkData struct { + CurrentVersion []byte `protobuf:"bytes,4,opt,name=current_version,json=currentVersion,proto3" json:"current_version,omitempty" ssz-size:"4"` + GenesisValidatorsRoot []byte `protobuf:"bytes,2,opt,name=genesis_validators_root,json=genesisValidatorsRoot,proto3" json:"genesis_validators_root,omitempty" ssz-size:"32"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ForkData) Reset() { *m = ForkData{} } +func (m *ForkData) String() string { return proto.CompactTextString(m) } +func (*ForkData) ProtoMessage() {} +func (*ForkData) Descriptor() ([]byte, []int) { + return fileDescriptor_e719e7d82cfa7b0d, []int{7} +} +func (m *ForkData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ForkData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ForkData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ForkData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ForkData.Merge(m, src) +} +func (m *ForkData) XXX_Size() int { + return m.Size() +} +func (m *ForkData) XXX_DiscardUnknown() { + xxx_messageInfo_ForkData.DiscardUnknown(m) +} + +var xxx_messageInfo_ForkData proto.InternalMessageInfo + +func (m *ForkData) GetCurrentVersion() []byte { + if m != nil { + return m.CurrentVersion + } + return nil +} + +func (m *ForkData) GetGenesisValidatorsRoot() []byte { + if m != nil { + return m.GenesisValidatorsRoot + } + return nil +} + +type SignedAggregateAndProof struct { + Message *v1alpha1.AggregateAttestationAndProof `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignedAggregateAndProof) Reset() { *m = SignedAggregateAndProof{} } +func (m *SignedAggregateAndProof) String() string { return proto.CompactTextString(m) } +func (*SignedAggregateAndProof) ProtoMessage() {} +func (*SignedAggregateAndProof) Descriptor() ([]byte, []int) { + return fileDescriptor_e719e7d82cfa7b0d, []int{8} +} +func (m *SignedAggregateAndProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedAggregateAndProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedAggregateAndProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedAggregateAndProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedAggregateAndProof.Merge(m, src) +} +func (m *SignedAggregateAndProof) XXX_Size() int { + return m.Size() +} +func (m *SignedAggregateAndProof) XXX_DiscardUnknown() { + xxx_messageInfo_SignedAggregateAndProof.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedAggregateAndProof proto.InternalMessageInfo + +func (m *SignedAggregateAndProof) GetMessage() *v1alpha1.AggregateAttestationAndProof { + if m != nil { + return m.Message + } + return nil +} + +func (m *SignedAggregateAndProof) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + func init() { proto.RegisterType((*BeaconState)(nil), "ethereum.beacon.p2p.v1.BeaconState") proto.RegisterType((*Fork)(nil), "ethereum.beacon.p2p.v1.Fork") @@ -530,80 +704,93 @@ func init() { proto.RegisterType((*ValidatorLatestVote)(nil), "ethereum.beacon.p2p.v1.ValidatorLatestVote") proto.RegisterType((*HistoricalBatch)(nil), "ethereum.beacon.p2p.v1.HistoricalBatch") proto.RegisterType((*StateSummary)(nil), "ethereum.beacon.p2p.v1.StateSummary") + proto.RegisterType((*SigningRoot)(nil), "ethereum.beacon.p2p.v1.SigningRoot") + proto.RegisterType((*ForkData)(nil), "ethereum.beacon.p2p.v1.ForkData") + proto.RegisterType((*SignedAggregateAndProof)(nil), "ethereum.beacon.p2p.v1.SignedAggregateAndProof") } func init() { proto.RegisterFile("proto/beacon/p2p/v1/types.proto", fileDescriptor_e719e7d82cfa7b0d) } var fileDescriptor_e719e7d82cfa7b0d = []byte{ - // 1088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xd7, 0x26, 0x06, 0xda, 0x89, 0x1b, 0xa7, 0xe3, 0xaa, 0x59, 0xd2, 0x90, 0x35, 0x2b, 0xd1, - 0x46, 0xa8, 0x59, 0x67, 0x9d, 0xc4, 0x4e, 0x52, 0xd1, 0x8a, 0x6d, 0x8b, 0x0a, 0x02, 0x09, 0x6d, - 0x21, 0x12, 0x12, 0x62, 0x35, 0x5e, 0x8f, 0xbd, 0x43, 0xd6, 0x3b, 0xab, 0x9d, 0xb1, 0x95, 0x44, - 0x42, 0x1c, 0x38, 0x71, 0x40, 0x5c, 0xf8, 0x07, 0xe0, 0xbf, 0x00, 0x4e, 0x7c, 0x1c, 0x38, 0xf2, - 0x75, 0x81, 0x83, 0x85, 0x72, 0x03, 0x4e, 0xf8, 0xc8, 0x09, 0xcd, 0xec, 0xa7, 0xa9, 0x0d, 0x3e, - 0xf4, 0xb6, 0xfb, 0xde, 0xef, 0xf7, 0x7b, 0x6f, 0xde, 0x7b, 0xfb, 0x66, 0x81, 0x16, 0x46, 0x94, - 0xd3, 0x7a, 0x1b, 0x23, 0x97, 0x06, 0xf5, 0xb0, 0x11, 0xd6, 0x87, 0x66, 0x9d, 0x9f, 0x86, 0x98, - 0x19, 0xd2, 0x03, 0xaf, 0x62, 0xee, 0xe1, 0x08, 0x0f, 0xfa, 0x46, 0x8c, 0x31, 0xc2, 0x46, 0x68, - 0x0c, 0xcd, 0xb5, 0x0d, 0xcc, 0xbd, 0xfa, 0xd0, 0x44, 0x7e, 0xe8, 0x21, 0xb3, 0x8e, 0x38, 0xc7, - 0x8c, 0x23, 0x4e, 0x04, 0x40, 0xf0, 0xd6, 0xb4, 0x09, 0x7f, 0xcc, 0x75, 0xda, 0x3e, 0x75, 0x8f, - 0x13, 0xc0, 0xfa, 0x04, 0x60, 0x88, 0x7c, 0xd2, 0x41, 0x9c, 0x46, 0x89, 0x77, 0xab, 0x47, 0xb8, - 0x37, 0x68, 0x1b, 0x2e, 0xed, 0xd7, 0x7b, 0xb4, 0x47, 0xeb, 0xd2, 0xdc, 0x1e, 0x74, 0xe5, 0x5b, - 0x9c, 0xb4, 0x78, 0x8a, 0xe1, 0xfa, 0x47, 0x65, 0xb0, 0x64, 0xc9, 0x18, 0x0f, 0x39, 0xe2, 0x18, - 0xea, 0xa0, 0xdc, 0xc3, 0x01, 0x66, 0x84, 0x39, 0x9c, 0xf4, 0xb1, 0xfa, 0xfb, 0x53, 0x35, 0x65, - 0xb3, 0x64, 0x2f, 0x25, 0xc6, 0x37, 0x48, 0x1f, 0xc3, 0x2a, 0x28, 0x31, 0x9f, 0x72, 0xf5, 0x8f, - 0xd8, 0x27, 0x5f, 0xa0, 0x09, 0x4a, 0x5d, 0x1a, 0x1d, 0xab, 0x7f, 0x0a, 0xe3, 0x52, 0x63, 0xdd, - 0x98, 0x7e, 0x7c, 0xe3, 0x25, 0x1a, 0x1d, 0xdb, 0x12, 0x0a, 0xdf, 0x02, 0x55, 0x1f, 0x89, 0xe3, - 0xc7, 0xc7, 0x73, 0x3c, 0x8c, 0x3a, 0x38, 0x52, 0x7f, 0xa8, 0x48, 0x85, 0xcd, 0x5c, 0x01, 0x73, - 0xcf, 0x48, 0x0f, 0x6c, 0xc4, 0xd9, 0x5a, 0x82, 0xf1, 0x40, 0x12, 0xec, 0xcb, 0xb1, 0x4a, 0xc1, - 0x04, 0xf7, 0xc1, 0x52, 0xac, 0x19, 0x51, 0xca, 0x99, 0xfa, 0x63, 0xa5, 0xb6, 0xb8, 0x59, 0xb6, - 0xae, 0x8e, 0x47, 0x1a, 0x64, 0xec, 0x6c, 0x8b, 0x91, 0x33, 0x7c, 0xa8, 0xef, 0x9b, 0x07, 0x8d, - 0x9b, 0x3b, 0x0d, 0xdd, 0x06, 0x12, 0x6b, 0x0b, 0xa8, 0x60, 0x8a, 0x7e, 0xe0, 0x84, 0xf9, 0xd3, - 0xff, 0x30, 0x25, 0x36, 0x66, 0xda, 0x60, 0xc5, 0x23, 0x8c, 0xd3, 0x88, 0xb8, 0xc8, 0x4f, 0xe8, - 0x3f, 0xc7, 0xf4, 0xeb, 0xe3, 0x91, 0xa6, 0xe7, 0xf4, 0x3b, 0x82, 0x5b, 0x13, 0xef, 0x7d, 0x74, - 0x72, 0xa8, 0x9b, 0xcd, 0x56, 0xab, 0xd5, 0x30, 0x9b, 0xba, 0x5d, 0xc9, 0x05, 0x62, 0xcd, 0x17, - 0xc0, 0x45, 0xcc, 0x3d, 0xd3, 0xe9, 0x20, 0x8e, 0xd4, 0xcf, 0x57, 0x65, 0x61, 0xb4, 0x19, 0x85, - 0xb9, 0xcf, 0x3d, 0xf3, 0x1e, 0xe2, 0xc8, 0xbe, 0x80, 0x93, 0x27, 0xf8, 0x36, 0xa8, 0x64, 0x74, - 0x67, 0x48, 0x39, 0x66, 0xea, 0x17, 0xab, 0xb5, 0xc5, 0x39, 0x44, 0x2c, 0x38, 0x1e, 0x69, 0xcb, - 0x79, 0x8a, 0xdb, 0x8d, 0x5d, 0xdd, 0xbe, 0x94, 0x0a, 0x1f, 0x09, 0x29, 0xb8, 0x05, 0x60, 0xac, - 0x8e, 0x43, 0xca, 0x08, 0x77, 0x48, 0xd0, 0xc1, 0x27, 0xea, 0x97, 0xab, 0x72, 0x2a, 0x56, 0x24, - 0x36, 0xf6, 0xbc, 0x2c, 0x1c, 0xf0, 0x1d, 0x00, 0xb2, 0x61, 0x65, 0xea, 0xa7, 0x9a, 0xcc, 0xa3, - 0x36, 0x23, 0x8f, 0xa3, 0x14, 0x69, 0x5d, 0x1b, 0x8f, 0xb4, 0xd5, 0x42, 0x22, 0x07, 0x07, 0x7b, - 0xa6, 0xd9, 0x6c, 0xb4, 0x5a, 0xad, 0xa6, 0x6e, 0x17, 0x14, 0xe1, 0x3e, 0xb8, 0xd0, 0x46, 0x3e, - 0x0a, 0x5c, 0xcc, 0xd4, 0xcf, 0x84, 0x7a, 0xe9, 0xbf, 0xb9, 0x19, 0x1a, 0xde, 0x02, 0xe5, 0x08, - 0x05, 0x1d, 0x44, 0x9d, 0x3e, 0x39, 0xc1, 0x4c, 0xfd, 0xf0, 0x86, 0xec, 0xda, 0xea, 0x78, 0xa4, - 0x55, 0xf3, 0xae, 0x35, 0xf7, 0xf6, 0x76, 0x9a, 0xb2, 0xeb, 0x4b, 0x31, 0xfa, 0x35, 0x01, 0x86, - 0x0d, 0x70, 0x91, 0xf9, 0x88, 0x79, 0x24, 0xe8, 0x31, 0xf5, 0x2f, 0x43, 0xc6, 0xad, 0x8e, 0x47, - 0x5a, 0x65, 0x72, 0x5c, 0x74, 0x3b, 0x87, 0xc1, 0xf7, 0xc1, 0xb5, 0x30, 0xc2, 0x43, 0x42, 0x07, - 0xcc, 0xc1, 0x21, 0x75, 0x3d, 0xa7, 0xb0, 0x07, 0x98, 0xfa, 0x4b, 0x53, 0xd6, 0xe6, 0xf9, 0x59, - 0xdf, 0xd0, 0xeb, 0x38, 0xe8, 0x90, 0xa0, 0xf7, 0x62, 0xce, 0xf9, 0x57, 0xbb, 0x76, 0xb7, 0x0f, - 0x9a, 0xba, 0xfd, 0x74, 0x1a, 0xe3, 0xbe, 0x08, 0x51, 0x40, 0x33, 0xf8, 0x1e, 0x58, 0x73, 0x07, - 0x51, 0x84, 0x03, 0x3e, 0x2d, 0xfe, 0xaf, 0x8f, 0x27, 0xbe, 0x9a, 0x84, 0x78, 0x34, 0x3c, 0x03, - 0xf0, 0xdd, 0x01, 0xe3, 0xa4, 0x4b, 0x5c, 0x69, 0x71, 0xda, 0x84, 0x33, 0xf5, 0xab, 0xdb, 0x35, - 0x65, 0xb3, 0x6c, 0xdd, 0x1d, 0x8f, 0xb4, 0x72, 0x5e, 0x3c, 0x53, 0xff, 0x7b, 0xa4, 0xd5, 0x0b, - 0x5b, 0x2d, 0x8c, 0x4e, 0x59, 0x1f, 0x71, 0xe2, 0xfa, 0xa8, 0xcd, 0xea, 0x3d, 0xba, 0xd5, 0x26, - 0xbc, 0x4b, 0xb0, 0xdf, 0x31, 0x2c, 0xc2, 0x87, 0xd8, 0xe5, 0x34, 0xda, 0xb5, 0x2f, 0x4f, 0xe8, - 0x5b, 0x84, 0x33, 0xd8, 0x05, 0xcf, 0x64, 0x45, 0x4f, 0xbc, 0xb8, 0xe3, 0xb8, 0x1e, 0x76, 0x8f, - 0x43, 0x4a, 0x02, 0xae, 0x7e, 0x7d, 0x5b, 0x7e, 0x5f, 0xcf, 0xce, 0x18, 0xc9, 0xbb, 0x19, 0xd2, - 0xce, 0xba, 0xf7, 0x4a, 0xaa, 0x93, 0x3b, 0x61, 0x07, 0xac, 0xa7, 0xb5, 0x9d, 0x1a, 0xe6, 0x9b, - 0xb9, 0xc3, 0xa4, 0x3d, 0x9a, 0x16, 0xe5, 0x4d, 0x70, 0xa5, 0x4b, 0x02, 0xe4, 0x93, 0xb3, 0x49, - 0xf5, 0x6f, 0xe7, 0x56, 0xaf, 0x66, 0xfc, 0xdc, 0xa8, 0x7f, 0xa2, 0x80, 0x92, 0x58, 0xd1, 0xf0, - 0x16, 0x58, 0xc9, 0xaa, 0x35, 0xc4, 0x11, 0x23, 0x34, 0x50, 0x15, 0xd9, 0x9f, 0x95, 0xc9, 0xfe, - 0xec, 0xea, 0x76, 0x25, 0x45, 0x1e, 0xc5, 0x40, 0x78, 0x00, 0x2a, 0x69, 0x09, 0x52, 0xee, 0xc2, - 0x0c, 0xee, 0x72, 0x02, 0x4c, 0xa9, 0x57, 0xc0, 0x13, 0x72, 0x22, 0xd5, 0x45, 0xb9, 0x46, 0xe2, - 0x17, 0xfd, 0xe3, 0x05, 0x00, 0x1f, 0x9d, 0x3a, 0xd8, 0x07, 0x2b, 0xa8, 0xd7, 0x8b, 0x70, 0xaf, - 0x30, 0x45, 0x71, 0x92, 0xd6, 0xc4, 0x3c, 0x36, 0xb6, 0x77, 0xf7, 0xc5, 0x18, 0xdd, 0x9c, 0x77, - 0x8c, 0x7c, 0xc2, 0xb8, 0x5d, 0x29, 0x68, 0xcb, 0x09, 0x3a, 0x04, 0x25, 0xb9, 0x88, 0x17, 0x64, - 0x89, 0xaf, 0xcf, 0x28, 0x71, 0x21, 0x41, 0xb9, 0x8e, 0x25, 0x07, 0xde, 0x00, 0x15, 0x12, 0xb8, - 0xfe, 0x40, 0x1c, 0xd2, 0xe9, 0x60, 0x1f, 0x9d, 0x26, 0x27, 0x5c, 0xce, 0xcc, 0xf7, 0x84, 0x15, - 0x3e, 0x07, 0x96, 0xc3, 0x88, 0x86, 0x94, 0xe1, 0x28, 0xd9, 0xa8, 0x25, 0x89, 0xbb, 0x94, 0x5a, - 0xe5, 0x36, 0xd5, 0xef, 0x80, 0x6a, 0xb6, 0x23, 0x5f, 0x95, 0xf7, 0x9f, 0x58, 0xca, 0x79, 0xf9, - 0x94, 0x42, 0xf9, 0x20, 0x04, 0x25, 0x71, 0x1f, 0xc5, 0x4d, 0xb0, 0xe5, 0xb3, 0xfe, 0x81, 0x02, - 0x2a, 0x0f, 0xb2, 0xeb, 0xc6, 0x42, 0xdc, 0xf5, 0x60, 0x6b, 0xf2, 0xda, 0x54, 0xe6, 0xbe, 0x35, - 0x5b, 0x93, 0xb7, 0xe6, 0xc2, 0xbc, 0x97, 0xa6, 0xde, 0x04, 0x65, 0xf9, 0xe3, 0xf1, 0x70, 0xd0, - 0xef, 0xa3, 0xe8, 0x54, 0x64, 0x2a, 0xff, 0x2d, 0x94, 0xc2, 0xaf, 0xc5, 0x94, 0xec, 0xad, 0xf2, - 0x77, 0xe7, 0x1b, 0xca, 0xf7, 0xe7, 0x1b, 0xca, 0x6f, 0xe7, 0x1b, 0x4a, 0xfb, 0x49, 0xf9, 0x33, - 0xb3, 0xf3, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x9c, 0x5c, 0x20, 0x95, 0x09, 0x00, 0x00, + // 1243 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcd, 0x8f, 0xdb, 0xc4, + 0x1b, 0x96, 0xb7, 0xf9, 0xf5, 0x63, 0x92, 0x6e, 0xb6, 0xb3, 0xfd, 0x35, 0xa6, 0x2d, 0xeb, 0xc5, + 0x12, 0x6d, 0x41, 0xdd, 0xa4, 0xf6, 0x6e, 0x93, 0xdd, 0x56, 0xb4, 0x6a, 0xda, 0xa2, 0xb6, 0xa2, + 0x52, 0xe5, 0x42, 0x25, 0x24, 0x84, 0x35, 0xb1, 0x27, 0xf6, 0x74, 0x6d, 0x8f, 0xe5, 0x99, 0x44, + 0xdd, 0x4a, 0x88, 0x03, 0x27, 0x4e, 0x70, 0xe0, 0xc2, 0x11, 0xfe, 0x0b, 0xe0, 0xc4, 0xc7, 0x81, + 0x23, 0x5f, 0x97, 0x72, 0x88, 0xd0, 0xde, 0xf8, 0xb8, 0x90, 0x23, 0x27, 0x34, 0xe3, 0xaf, 0x84, + 0x26, 0x10, 0x24, 0x6e, 0x9e, 0x99, 0xe7, 0x79, 0xde, 0x77, 0xde, 0xf7, 0xf5, 0x3b, 0x2f, 0xd0, + 0xe2, 0x84, 0x72, 0xda, 0xea, 0x61, 0xe4, 0xd0, 0xa8, 0x15, 0x9b, 0x71, 0x6b, 0x68, 0xb4, 0xf8, + 0x5e, 0x8c, 0x59, 0x53, 0x9e, 0xc0, 0x13, 0x98, 0xfb, 0x38, 0xc1, 0x83, 0xb0, 0x99, 0x62, 0x9a, + 0xb1, 0x19, 0x37, 0x87, 0xc6, 0xc9, 0x35, 0xcc, 0xfd, 0xd6, 0xd0, 0x40, 0x41, 0xec, 0x23, 0xa3, + 0x85, 0x38, 0xc7, 0x8c, 0x23, 0x4e, 0x04, 0x40, 0xf0, 0x4e, 0x6a, 0x53, 0xe7, 0x29, 0xd7, 0xee, + 0x05, 0xd4, 0xd9, 0xcd, 0x00, 0xa7, 0xa7, 0x00, 0x43, 0x14, 0x10, 0x17, 0x71, 0x9a, 0x64, 0xa7, + 0x1b, 0x1e, 0xe1, 0xfe, 0xa0, 0xd7, 0x74, 0x68, 0xd8, 0xf2, 0xa8, 0x47, 0x5b, 0x72, 0xbb, 0x37, + 0xe8, 0xcb, 0x55, 0xea, 0xb4, 0xf8, 0x4a, 0xe1, 0xfa, 0x93, 0x1a, 0xa8, 0x76, 0xa5, 0x8d, 0xfb, + 0x1c, 0x71, 0x0c, 0x75, 0x50, 0xf3, 0x70, 0x84, 0x19, 0x61, 0x36, 0x27, 0x21, 0x56, 0x7f, 0x3e, + 0xb4, 0xae, 0x9c, 0xab, 0x58, 0xd5, 0x6c, 0xf3, 0x55, 0x12, 0x62, 0x78, 0x07, 0x34, 0x72, 0x4c, + 0x61, 0x9d, 0xd9, 0x09, 0xa5, 0x5c, 0xfd, 0x45, 0xc0, 0x6b, 0xdd, 0x63, 0xe3, 0x91, 0x76, 0x94, + 0xb1, 0xc7, 0x1b, 0x8c, 0x3c, 0xc6, 0x97, 0xf4, 0x4d, 0x53, 0xb7, 0xfe, 0x9f, 0x51, 0x1e, 0x14, + 0x0c, 0x8b, 0x52, 0x0e, 0x57, 0x41, 0x85, 0x05, 0x94, 0xab, 0xbf, 0xa6, 0x76, 0xe4, 0x02, 0x1a, + 0xa0, 0xd2, 0xa7, 0xc9, 0xae, 0xfa, 0x9b, 0xd8, 0xac, 0x9a, 0xa7, 0x9b, 0xb3, 0x43, 0xd9, 0x7c, + 0x99, 0x26, 0xbb, 0x96, 0x84, 0xc2, 0xd7, 0xc1, 0x6a, 0x80, 0x44, 0x28, 0xd3, 0x50, 0xd9, 0x3e, + 0x46, 0x2e, 0x4e, 0xd4, 0x6f, 0xeb, 0x52, 0xe1, 0x5c, 0xa9, 0x80, 0xb9, 0xdf, 0xcc, 0x83, 0xd7, + 0x4c, 0x6f, 0xde, 0x15, 0x8c, 0x5b, 0x92, 0x60, 0x1d, 0x4b, 0x55, 0x26, 0xb6, 0xe0, 0x36, 0xa8, + 0xa6, 0x9a, 0xe2, 0x86, 0x4c, 0xfd, 0xae, 0xbe, 0x7e, 0xe0, 0x5c, 0xad, 0x7b, 0x62, 0x3c, 0xd2, + 0x60, 0x79, 0xc5, 0x6d, 0x63, 0xc7, 0x3c, 0x2f, 0xee, 0x09, 0x24, 0x56, 0xdc, 0x8d, 0x09, 0xa6, + 0xc8, 0x2d, 0xce, 0x98, 0xdf, 0xff, 0x03, 0x53, 0x62, 0x53, 0xa6, 0x05, 0x56, 0x7c, 0xc2, 0x38, + 0x4d, 0x88, 0x83, 0x82, 0x8c, 0xfe, 0x43, 0x4a, 0x3f, 0x33, 0x1e, 0x69, 0x7a, 0x49, 0xbf, 0x2a, + 0xb8, 0xeb, 0x62, 0x1d, 0xa2, 0x47, 0x97, 0x74, 0xa3, 0xdd, 0xe9, 0x74, 0x4c, 0xa3, 0xad, 0x5b, + 0xf5, 0x52, 0x20, 0xd5, 0x7c, 0x09, 0x1c, 0xc1, 0xdc, 0x37, 0x6c, 0x17, 0x71, 0xa4, 0x7e, 0xd2, + 0x90, 0x81, 0xd1, 0xe6, 0x04, 0xe6, 0x26, 0xf7, 0x8d, 0x1b, 0x88, 0x23, 0xeb, 0x30, 0xce, 0xbe, + 0xe0, 0x1b, 0xa0, 0x5e, 0xd0, 0xed, 0x21, 0xe5, 0x98, 0xa9, 0x9f, 0x36, 0xd6, 0x0f, 0x2c, 0x20, + 0xd2, 0x85, 0xe3, 0x91, 0xb6, 0x5c, 0xba, 0x78, 0xc1, 0xdc, 0xd2, 0xad, 0xa3, 0xb9, 0xf0, 0x03, + 0x21, 0x05, 0x37, 0x00, 0x4c, 0xd5, 0x71, 0x4c, 0x19, 0xe1, 0x36, 0x89, 0x5c, 0xfc, 0x48, 0xfd, + 0xac, 0x21, 0xab, 0x62, 0x45, 0x62, 0xd3, 0x93, 0xdb, 0xe2, 0x00, 0xbe, 0x09, 0x40, 0x59, 0x7a, + 0xea, 0x47, 0x9a, 0xf4, 0x63, 0x7d, 0x8e, 0x1f, 0x45, 0xc9, 0x75, 0x4f, 0x8d, 0x47, 0x5a, 0x63, + 0xc2, 0x91, 0x9d, 0x9d, 0x8b, 0x86, 0xd1, 0x36, 0x3b, 0x9d, 0x4e, 0x5b, 0xb7, 0x26, 0x14, 0xe1, + 0x36, 0x38, 0xdc, 0x43, 0x01, 0x8a, 0x1c, 0xcc, 0xd4, 0x8f, 0x85, 0x7a, 0xe5, 0xef, 0xb9, 0x05, + 0x1a, 0x5e, 0x06, 0xb5, 0x04, 0x45, 0x2e, 0xa2, 0x76, 0x48, 0x1e, 0x61, 0xa6, 0xbe, 0x7b, 0x56, + 0x66, 0xad, 0x31, 0x1e, 0x69, 0xab, 0x65, 0xd6, 0xda, 0x17, 0x2f, 0x6e, 0xb6, 0x65, 0xd6, 0xab, + 0x29, 0xfa, 0xae, 0x00, 0x43, 0x13, 0x1c, 0x61, 0x01, 0x62, 0x3e, 0x89, 0x3c, 0xa6, 0xfe, 0xde, + 0x94, 0x76, 0x57, 0xc7, 0x23, 0xad, 0x3e, 0x5d, 0x2e, 0xba, 0x55, 0xc2, 0xe0, 0xdb, 0xe0, 0x54, + 0x9c, 0xe0, 0x21, 0xa1, 0x03, 0x66, 0xe3, 0x98, 0x3a, 0xbe, 0x3d, 0xd1, 0x53, 0x98, 0xfa, 0xa4, + 0x2d, 0x63, 0xf3, 0xe2, 0xbc, 0x7f, 0xe8, 0x1e, 0x8e, 0x5c, 0x12, 0x79, 0xd7, 0x4a, 0xce, 0x5f, + 0xd2, 0xb5, 0x75, 0x61, 0xa7, 0xad, 0x5b, 0xcf, 0xe4, 0x36, 0x6e, 0x0a, 0x13, 0x13, 0x68, 0x06, + 0xdf, 0x02, 0x27, 0x9d, 0x41, 0x92, 0xe0, 0x88, 0xcf, 0xb2, 0xff, 0xe3, 0x7f, 0x63, 0x5f, 0xcd, + 0x4c, 0x3c, 0x6d, 0x9e, 0x01, 0xf8, 0x70, 0xc0, 0x38, 0xe9, 0x13, 0x47, 0xee, 0xd8, 0x3d, 0xc2, + 0x99, 0xfa, 0xf9, 0x15, 0xd9, 0x88, 0xae, 0x8f, 0x47, 0x5a, 0xad, 0x0c, 0x9e, 0xa1, 0xff, 0x31, + 0xd2, 0x5a, 0x13, 0x1d, 0x32, 0x4e, 0xf6, 0x58, 0x88, 0x38, 0x71, 0x02, 0xd4, 0x63, 0x2d, 0x8f, + 0x6e, 0xf4, 0x08, 0xef, 0x13, 0x1c, 0xb8, 0xcd, 0x2e, 0xe1, 0x43, 0xec, 0x70, 0x9a, 0x6c, 0x59, + 0xc7, 0xa6, 0xf4, 0xbb, 0x84, 0x33, 0xd8, 0x07, 0xcf, 0x16, 0x41, 0xcf, 0x4e, 0xb1, 0x6b, 0x3b, + 0x3e, 0x76, 0x76, 0x63, 0x4a, 0x22, 0xae, 0x7e, 0x71, 0x45, 0xfe, 0x5f, 0xcf, 0xcd, 0x29, 0xc9, + 0xeb, 0x05, 0xd2, 0x2a, 0xb2, 0x77, 0x27, 0xd7, 0x29, 0x0f, 0xa1, 0x0b, 0x4e, 0xe7, 0xb1, 0x9d, + 0x69, 0xe6, 0xcb, 0x85, 0xcd, 0xe4, 0x39, 0x9a, 0x65, 0xe5, 0x35, 0x70, 0xbc, 0x4f, 0x22, 0x14, + 0x90, 0xc7, 0xd3, 0xea, 0x5f, 0x2d, 0xac, 0xbe, 0x5a, 0xf0, 0xcb, 0x4d, 0xfd, 0x03, 0x05, 0x54, + 0x44, 0x8b, 0x86, 0x97, 0xc1, 0x4a, 0x11, 0xad, 0x21, 0x4e, 0x18, 0xa1, 0x91, 0xaa, 0xc8, 0xfc, + 0xac, 0x4c, 0xe7, 0x67, 0x4b, 0xb7, 0xea, 0x39, 0xf2, 0x41, 0x0a, 0x84, 0x3b, 0xa0, 0x9e, 0x87, + 0x20, 0xe7, 0x2e, 0xcd, 0xe1, 0x2e, 0x67, 0xc0, 0x9c, 0x7a, 0x1c, 0xfc, 0x4f, 0x56, 0xa4, 0x7a, + 0x40, 0xb6, 0x91, 0x74, 0xa1, 0xbf, 0xb7, 0x04, 0xe0, 0xd3, 0x55, 0x07, 0x43, 0xb0, 0x82, 0x3c, + 0x2f, 0xc1, 0xde, 0x44, 0x15, 0xa5, 0x4e, 0x76, 0xa7, 0xea, 0xd1, 0xbc, 0xb0, 0xb5, 0x2d, 0xca, + 0xe8, 0xfc, 0xa2, 0x65, 0x14, 0x10, 0xc6, 0xad, 0xfa, 0x84, 0xb6, 0xac, 0xa0, 0x4b, 0xa0, 0x22, + 0x1b, 0xf1, 0x92, 0x0c, 0xf1, 0x99, 0x39, 0x21, 0x9e, 0x70, 0x50, 0xb6, 0x63, 0xc9, 0x81, 0x67, + 0x41, 0x9d, 0x44, 0x4e, 0x30, 0x10, 0x97, 0xb4, 0x5d, 0x1c, 0xa0, 0xbd, 0xec, 0x86, 0xcb, 0xc5, + 0xf6, 0x0d, 0xb1, 0x0b, 0x9f, 0x07, 0xcb, 0x71, 0x42, 0x63, 0xca, 0x70, 0x92, 0x75, 0xd4, 0x8a, + 0xc4, 0x1d, 0xcd, 0x77, 0x65, 0x37, 0xd5, 0xaf, 0x82, 0xd5, 0xa2, 0x47, 0xbe, 0x22, 0xdf, 0x3f, + 0xd1, 0x94, 0xcb, 0xf0, 0x29, 0x13, 0xe1, 0x83, 0x10, 0x54, 0xe4, 0x53, 0x2f, 0x93, 0x60, 0xc9, + 0x6f, 0xfd, 0x1d, 0x05, 0xd4, 0x6f, 0x15, 0xcf, 0x4d, 0x17, 0x71, 0xc7, 0x87, 0x9d, 0xe9, 0x67, + 0x53, 0x59, 0xf8, 0xd5, 0xec, 0x4c, 0xbf, 0x9a, 0x4b, 0x8b, 0x3e, 0x9a, 0x7a, 0x1b, 0xd4, 0xe4, + 0x10, 0x73, 0x7f, 0x10, 0x86, 0x28, 0xd9, 0x13, 0x9e, 0xca, 0xd9, 0x42, 0x99, 0x18, 0x2d, 0x66, + 0x79, 0x1f, 0x80, 0xea, 0x7d, 0xe2, 0x45, 0x24, 0xf2, 0xe4, 0x48, 0x62, 0x82, 0x2a, 0xed, 0x3d, + 0xc4, 0x0e, 0x4f, 0x47, 0x1a, 0x65, 0xde, 0x44, 0x03, 0x52, 0x94, 0xe4, 0xbc, 0x00, 0x0e, 0xba, + 0x34, 0x44, 0x24, 0xaf, 0xcd, 0x19, 0xf0, 0x0c, 0xa0, 0xbf, 0xaf, 0x80, 0xc3, 0xe2, 0xaf, 0x90, + 0x8f, 0xea, 0x8c, 0xe2, 0xae, 0x2c, 0x58, 0xdc, 0xb7, 0xe7, 0x4f, 0x61, 0x4b, 0xff, 0x6e, 0x08, + 0xd3, 0x3f, 0x54, 0x40, 0x43, 0x44, 0x00, 0xbb, 0xd7, 0xb2, 0x2a, 0xc5, 0xd7, 0x22, 0xf7, 0x5e, + 0x42, 0x69, 0x1f, 0xde, 0x05, 0x87, 0x42, 0xcc, 0x18, 0xf2, 0xb0, 0x8c, 0x44, 0xd5, 0xdc, 0x9c, + 0x57, 0xaa, 0x05, 0xb5, 0xac, 0xd9, 0x5c, 0xc5, 0xca, 0x35, 0x60, 0x0b, 0x1c, 0x61, 0xc4, 0x8b, + 0x10, 0x1f, 0x24, 0x78, 0xb6, 0x9f, 0xa2, 0xdb, 0x97, 0x98, 0x6e, 0xed, 0xeb, 0xfd, 0x35, 0xe5, + 0x9b, 0xfd, 0x35, 0xe5, 0xa7, 0xfd, 0x35, 0xa5, 0x77, 0x50, 0x4e, 0xad, 0x9b, 0x7f, 0x06, 0x00, + 0x00, 0xff, 0xff, 0x5d, 0x05, 0xc0, 0x7f, 0x7e, 0x0b, 0x00, 0x00, } func (m *BeaconState) Marshal() (dAtA []byte, err error) { @@ -902,14 +1089,23 @@ func (m *BeaconState) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3e i-- - dAtA[i] = 0xda + dAtA[i] = 0xe2 } if m.Slot != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Slot)) i-- dAtA[i] = 0x3e i-- - dAtA[i] = 0xd0 + dAtA[i] = 0xd8 + } + if len(m.GenesisValidatorsRoot) > 0 { + i -= len(m.GenesisValidatorsRoot) + copy(dAtA[i:], m.GenesisValidatorsRoot) + i = encodeVarintTypes(dAtA, i, uint64(len(m.GenesisValidatorsRoot))) + i-- + dAtA[i] = 0x3e + i-- + dAtA[i] = 0xd2 } if m.GenesisTime != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.GenesisTime)) @@ -1146,6 +1342,134 @@ func (m *StateSummary) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SigningRoot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SigningRoot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SigningRoot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x12 + } + if len(m.ObjectRoot) > 0 { + i -= len(m.ObjectRoot) + copy(dAtA[i:], m.ObjectRoot) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ObjectRoot))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ForkData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ForkData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ForkData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.CurrentVersion) > 0 { + i -= len(m.CurrentVersion) + copy(dAtA[i:], m.CurrentVersion) + i = encodeVarintTypes(dAtA, i, uint64(len(m.CurrentVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.GenesisValidatorsRoot) > 0 { + i -= len(m.GenesisValidatorsRoot) + copy(dAtA[i:], m.GenesisValidatorsRoot) + i = encodeVarintTypes(dAtA, i, uint64(len(m.GenesisValidatorsRoot))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *SignedAggregateAndProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedAggregateAndProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedAggregateAndProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x12 + } + if m.Message != nil { + { + size, err := m.Message.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -1166,6 +1490,10 @@ func (m *BeaconState) Size() (n int) { if m.GenesisTime != 0 { n += 2 + sovTypes(uint64(m.GenesisTime)) } + l = len(m.GenesisValidatorsRoot) + if l > 0 { + n += 2 + l + sovTypes(uint64(l)) + } if m.Slot != 0 { n += 2 + sovTypes(uint64(m.Slot)) } @@ -1379,6 +1707,66 @@ func (m *StateSummary) Size() (n int) { return n } +func (m *SigningRoot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ObjectRoot) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ForkData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.GenesisValidatorsRoot) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CurrentVersion) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SignedAggregateAndProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1434,6 +1822,40 @@ func (m *BeaconState) Unmarshal(dAtA []byte) error { } } case 1002: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenesisValidatorsRoot", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenesisValidatorsRoot = append(m.GenesisValidatorsRoot[:0], dAtA[iNdEx:postIndex]...) + if m.GenesisValidatorsRoot == nil { + m.GenesisValidatorsRoot = []byte{} + } + iNdEx = postIndex + case 1003: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) } @@ -1452,7 +1874,7 @@ func (m *BeaconState) Unmarshal(dAtA []byte) error { break } } - case 1003: + case 1004: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Fork", wireType) } @@ -2797,6 +3219,374 @@ func (m *StateSummary) Unmarshal(dAtA []byte) error { } return nil } +func (m *SigningRoot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SigningRoot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SigningRoot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectRoot", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ObjectRoot = append(m.ObjectRoot[:0], dAtA[iNdEx:postIndex]...) + if m.ObjectRoot == nil { + m.ObjectRoot = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = append(m.Domain[:0], dAtA[iNdEx:postIndex]...) + if m.Domain == nil { + m.Domain = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ForkData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ForkData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ForkData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenesisValidatorsRoot", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenesisValidatorsRoot = append(m.GenesisValidatorsRoot[:0], dAtA[iNdEx:postIndex]...) + if m.GenesisValidatorsRoot == nil { + m.GenesisValidatorsRoot = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentVersion", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentVersion = append(m.CurrentVersion[:0], dAtA[iNdEx:postIndex]...) + if m.CurrentVersion == nil { + m.CurrentVersion = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedAggregateAndProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedAggregateAndProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedAggregateAndProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &v1alpha1.AggregateAttestationAndProof{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/beacon/p2p/v1/types.proto b/proto/beacon/p2p/v1/types.proto index f54114b14f..725fc68cb6 100644 --- a/proto/beacon/p2p/v1/types.proto +++ b/proto/beacon/p2p/v1/types.proto @@ -9,8 +9,9 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto"; message BeaconState { // Versioning [1001-2000] uint64 genesis_time = 1001; - uint64 slot = 1002; - Fork fork = 1003; + bytes genesis_validators_root = 1002 [(gogoproto.moretags) = "ssz-size:\"32\""]; + uint64 slot = 1003; + Fork fork = 1004; // History [2001-3000] ethereum.eth.v1alpha1.BeaconBlockHeader latest_block_header = 2001; @@ -84,3 +85,24 @@ message StateSummary { // The block root of the state. bytes root = 2; } + +message SigningRoot { + // The root of the object being signed. + bytes object_root = 1 [(gogoproto.moretags) = "ssz-size:\"32\""]; + // The domain for the particular object being signed. + bytes domain = 2 [(gogoproto.moretags) = "ssz-size:\"32\""]; +} + +message ForkData { + // The current version of the fork. + bytes current_version = 4 [(gogoproto.moretags) = "ssz-size:\"4\""]; + // The genesis validators root of the fork. + bytes genesis_validators_root = 2 [(gogoproto.moretags) = "ssz-size:\"32\""]; +} + +message SignedAggregateAndProof { + // The aggregated and proof from the aggregator. + ethereum.eth.v1alpha1.AggregateAttestationAndProof message = 1; + // The signature of the aggregator signing the aggregated and proof object. + bytes signature = 2 [(gogoproto.moretags) = "ssz-size:\"96\""]; +} diff --git a/proto/slashing/slashing.pb.go b/proto/slashing/slashing.pb.go deleted file mode 100755 index dc99b8a44e..0000000000 --- a/proto/slashing/slashing.pb.go +++ /dev/null @@ -1,1199 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: proto/slashing/slashing.proto - -package ethereum_slashing - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - v1alpha1 "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - github_com_prysmaticlabs_go_bitfield "github.com/prysmaticlabs/go-bitfield" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type ProposerSlashingResponse struct { - ProposerSlashing []*v1alpha1.ProposerSlashing `protobuf:"bytes,1,rep,name=proposer_slashing,json=proposerSlashing,proto3" json:"proposer_slashing,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProposerSlashingResponse) Reset() { *m = ProposerSlashingResponse{} } -func (m *ProposerSlashingResponse) String() string { return proto.CompactTextString(m) } -func (*ProposerSlashingResponse) ProtoMessage() {} -func (*ProposerSlashingResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_da7e95107d0081b4, []int{0} -} -func (m *ProposerSlashingResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProposerSlashingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProposerSlashingResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProposerSlashingResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProposerSlashingResponse.Merge(m, src) -} -func (m *ProposerSlashingResponse) XXX_Size() int { - return m.Size() -} -func (m *ProposerSlashingResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ProposerSlashingResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ProposerSlashingResponse proto.InternalMessageInfo - -func (m *ProposerSlashingResponse) GetProposerSlashing() []*v1alpha1.ProposerSlashing { - if m != nil { - return m.ProposerSlashing - } - return nil -} - -type AttesterSlashingResponse struct { - AttesterSlashing []*v1alpha1.AttesterSlashing `protobuf:"bytes,1,rep,name=attester_slashing,json=attesterSlashing,proto3" json:"attester_slashing,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AttesterSlashingResponse) Reset() { *m = AttesterSlashingResponse{} } -func (m *AttesterSlashingResponse) String() string { return proto.CompactTextString(m) } -func (*AttesterSlashingResponse) ProtoMessage() {} -func (*AttesterSlashingResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_da7e95107d0081b4, []int{1} -} -func (m *AttesterSlashingResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttesterSlashingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AttesterSlashingResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AttesterSlashingResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttesterSlashingResponse.Merge(m, src) -} -func (m *AttesterSlashingResponse) XXX_Size() int { - return m.Size() -} -func (m *AttesterSlashingResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AttesterSlashingResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AttesterSlashingResponse proto.InternalMessageInfo - -func (m *AttesterSlashingResponse) GetAttesterSlashing() []*v1alpha1.AttesterSlashing { - if m != nil { - return m.AttesterSlashing - } - return nil -} - -type ProposalHistory struct { - EpochBits github_com_prysmaticlabs_go_bitfield.Bitlist `protobuf:"bytes,1,opt,name=epoch_bits,json=epochBits,proto3,casttype=github.com/prysmaticlabs/go-bitfield.Bitlist" json:"epoch_bits,omitempty"` - LatestEpochWritten uint64 `protobuf:"varint,2,opt,name=latest_epoch_written,json=latestEpochWritten,proto3" json:"latest_epoch_written,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProposalHistory) Reset() { *m = ProposalHistory{} } -func (m *ProposalHistory) String() string { return proto.CompactTextString(m) } -func (*ProposalHistory) ProtoMessage() {} -func (*ProposalHistory) Descriptor() ([]byte, []int) { - return fileDescriptor_da7e95107d0081b4, []int{2} -} -func (m *ProposalHistory) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProposalHistory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProposalHistory.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProposalHistory) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProposalHistory.Merge(m, src) -} -func (m *ProposalHistory) XXX_Size() int { - return m.Size() -} -func (m *ProposalHistory) XXX_DiscardUnknown() { - xxx_messageInfo_ProposalHistory.DiscardUnknown(m) -} - -var xxx_messageInfo_ProposalHistory proto.InternalMessageInfo - -func (m *ProposalHistory) GetEpochBits() github_com_prysmaticlabs_go_bitfield.Bitlist { - if m != nil { - return m.EpochBits - } - return nil -} - -func (m *ProposalHistory) GetLatestEpochWritten() uint64 { - if m != nil { - return m.LatestEpochWritten - } - return 0 -} - -type AttestationHistory struct { - TargetToSource map[uint64]uint64 `protobuf:"bytes,1,rep,name=target_to_source,json=targetToSource,proto3" json:"target_to_source,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - LatestEpochWritten uint64 `protobuf:"varint,2,opt,name=latest_epoch_written,json=latestEpochWritten,proto3" json:"latest_epoch_written,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AttestationHistory) Reset() { *m = AttestationHistory{} } -func (m *AttestationHistory) String() string { return proto.CompactTextString(m) } -func (*AttestationHistory) ProtoMessage() {} -func (*AttestationHistory) Descriptor() ([]byte, []int) { - return fileDescriptor_da7e95107d0081b4, []int{3} -} -func (m *AttestationHistory) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttestationHistory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AttestationHistory.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AttestationHistory) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttestationHistory.Merge(m, src) -} -func (m *AttestationHistory) XXX_Size() int { - return m.Size() -} -func (m *AttestationHistory) XXX_DiscardUnknown() { - xxx_messageInfo_AttestationHistory.DiscardUnknown(m) -} - -var xxx_messageInfo_AttestationHistory proto.InternalMessageInfo - -func (m *AttestationHistory) GetTargetToSource() map[uint64]uint64 { - if m != nil { - return m.TargetToSource - } - return nil -} - -func (m *AttestationHistory) GetLatestEpochWritten() uint64 { - if m != nil { - return m.LatestEpochWritten - } - return 0 -} - -func init() { - proto.RegisterType((*ProposerSlashingResponse)(nil), "ethereum.slashing.ProposerSlashingResponse") - proto.RegisterType((*AttesterSlashingResponse)(nil), "ethereum.slashing.AttesterSlashingResponse") - proto.RegisterType((*ProposalHistory)(nil), "ethereum.slashing.ProposalHistory") - proto.RegisterType((*AttestationHistory)(nil), "ethereum.slashing.AttestationHistory") - proto.RegisterMapType((map[uint64]uint64)(nil), "ethereum.slashing.AttestationHistory.TargetToSourceEntry") -} - -func init() { proto.RegisterFile("proto/slashing/slashing.proto", fileDescriptor_da7e95107d0081b4) } - -var fileDescriptor_da7e95107d0081b4 = []byte{ - // 483 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0xd5, 0xb6, 0x05, 0xc4, 0x52, 0x41, 0xba, 0x54, 0x28, 0x8a, 0x44, 0x1a, 0xe5, 0x42, 0x10, - 0x74, 0xdd, 0x96, 0x0b, 0x70, 0xab, 0xa5, 0x4a, 0xed, 0x09, 0xe4, 0x44, 0xe2, 0x68, 0xad, 0xed, - 0xa9, 0xbd, 0xea, 0xc6, 0xbb, 0xda, 0x1d, 0x17, 0xf2, 0x1f, 0x7c, 0x14, 0x47, 0xbe, 0x00, 0xa1, - 0x9c, 0x11, 0x1f, 0xc0, 0x09, 0x79, 0xed, 0x54, 0xa1, 0x49, 0x24, 0x7a, 0x9b, 0x99, 0x37, 0xf3, - 0xde, 0xf3, 0x78, 0x87, 0x3e, 0x37, 0x56, 0xa3, 0x0e, 0x9c, 0x12, 0xae, 0x90, 0x65, 0x7e, 0x13, - 0x70, 0x5f, 0x67, 0x7b, 0x80, 0x05, 0x58, 0xa8, 0xa6, 0x7c, 0x01, 0xf4, 0x0e, 0x00, 0x8b, 0xe0, - 0xfa, 0x58, 0x28, 0x53, 0x88, 0xe3, 0x20, 0x01, 0x91, 0xea, 0x32, 0x4e, 0x94, 0x4e, 0xaf, 0x9a, - 0x99, 0xde, 0x61, 0x2e, 0xb1, 0xa8, 0x12, 0x9e, 0xea, 0x69, 0x90, 0xeb, 0x5c, 0x07, 0xbe, 0x9c, - 0x54, 0x97, 0x3e, 0x6b, 0xf4, 0xea, 0xa8, 0x69, 0x1f, 0x1a, 0xda, 0xfd, 0x68, 0xb5, 0xd1, 0x0e, - 0xec, 0xb8, 0xd5, 0x88, 0xc0, 0x19, 0x5d, 0x3a, 0x60, 0x13, 0xba, 0x67, 0x5a, 0x2c, 0x5e, 0x18, - 0xe8, 0x92, 0xc1, 0xf6, 0xe8, 0xd1, 0xc9, 0x0b, 0x7e, 0x63, 0x0d, 0xb0, 0xe0, 0x0b, 0x43, 0x7c, - 0x85, 0xab, 0x63, 0x6e, 0x55, 0x6a, 0xc5, 0x53, 0x44, 0x70, 0xb8, 0x5e, 0x51, 0xb4, 0xd8, 0xff, - 0x2a, 0xae, 0x70, 0x75, 0xc4, 0xad, 0xca, 0xf0, 0x2b, 0xa1, 0x4f, 0x1a, 0x63, 0x42, 0x9d, 0x4b, - 0x87, 0xda, 0xce, 0xd8, 0x07, 0x4a, 0xc1, 0xe8, 0xb4, 0x88, 0x13, 0x89, 0xae, 0x4b, 0x06, 0x64, - 0xb4, 0x1b, 0x1e, 0xfd, 0xf9, 0x71, 0xf0, 0x7a, 0x69, 0x7d, 0xc6, 0xce, 0xdc, 0x54, 0xa0, 0x4c, - 0x95, 0x48, 0x5c, 0x90, 0xeb, 0xc3, 0x44, 0xe2, 0xa5, 0x04, 0x95, 0xf1, 0x50, 0xa2, 0x92, 0x0e, - 0xa3, 0x87, 0x9e, 0x23, 0x94, 0xe8, 0xd8, 0x11, 0xdd, 0x57, 0xa2, 0x16, 0x8e, 0x1b, 0xde, 0xcf, - 0x56, 0x22, 0x42, 0xd9, 0xdd, 0x1a, 0x90, 0xd1, 0x4e, 0xc4, 0x1a, 0xec, 0xac, 0x86, 0x3e, 0x35, - 0xc8, 0xf0, 0x37, 0xa1, 0xac, 0x71, 0x2f, 0x50, 0xea, 0x72, 0xe1, 0x2c, 0xa5, 0x1d, 0x14, 0x36, - 0x07, 0x8c, 0x51, 0xc7, 0x4e, 0x57, 0x36, 0x85, 0x76, 0x05, 0xef, 0xf8, 0xca, 0x7b, 0xe0, 0xab, - 0x04, 0x7c, 0xe2, 0xa7, 0x27, 0x7a, 0xec, 0x67, 0xcf, 0x4a, 0xb4, 0xb3, 0xe8, 0x31, 0xfe, 0x53, - 0xbc, 0xbb, 0xdb, 0xde, 0x29, 0x7d, 0xba, 0x86, 0x98, 0x75, 0xe8, 0xf6, 0x15, 0xcc, 0xfc, 0x02, - 0x77, 0xa2, 0x3a, 0x64, 0xfb, 0xf4, 0xde, 0xb5, 0x50, 0x15, 0xb4, 0x5c, 0x4d, 0xf2, 0x7e, 0xeb, - 0x2d, 0x39, 0xf9, 0x45, 0xe8, 0x03, 0xff, 0x53, 0xc0, 0x32, 0x43, 0x9f, 0x5d, 0x38, 0x9f, 0x88, - 0x44, 0xc1, 0xd2, 0x57, 0xb0, 0x97, 0x1b, 0x7e, 0xf4, 0x45, 0x99, 0xc1, 0x17, 0xc8, 0x96, 0x5a, - 0x7b, 0xaf, 0x36, 0x2e, 0x64, 0xcd, 0xdb, 0xd2, 0xb4, 0xb3, 0xa4, 0x18, 0xd6, 0x27, 0xc3, 0xf8, - 0x06, 0xad, 0xb1, 0xcc, 0x4b, 0xc8, 0x42, 0x7f, 0x5d, 0xbe, 0xf3, 0x1c, 0x44, 0x06, 0x76, 0xad, - 0xe0, 0xa6, 0xf3, 0x09, 0x77, 0xbf, 0xcd, 0xfb, 0xe4, 0xfb, 0xbc, 0x4f, 0x7e, 0xce, 0xfb, 0x24, - 0xb9, 0xef, 0xef, 0xed, 0xcd, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x11, 0xe3, 0x4b, 0x92, 0xf3, - 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// SlasherClient is the client API for Slasher service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SlasherClient interface { - IsSlashableAttestation(ctx context.Context, in *v1alpha1.IndexedAttestation, opts ...grpc.CallOption) (*AttesterSlashingResponse, error) - IsSlashableBlock(ctx context.Context, in *v1alpha1.SignedBeaconBlockHeader, opts ...grpc.CallOption) (*ProposerSlashingResponse, error) -} - -type slasherClient struct { - cc *grpc.ClientConn -} - -func NewSlasherClient(cc *grpc.ClientConn) SlasherClient { - return &slasherClient{cc} -} - -func (c *slasherClient) IsSlashableAttestation(ctx context.Context, in *v1alpha1.IndexedAttestation, opts ...grpc.CallOption) (*AttesterSlashingResponse, error) { - out := new(AttesterSlashingResponse) - err := c.cc.Invoke(ctx, "/ethereum.slashing.Slasher/IsSlashableAttestation", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *slasherClient) IsSlashableBlock(ctx context.Context, in *v1alpha1.SignedBeaconBlockHeader, opts ...grpc.CallOption) (*ProposerSlashingResponse, error) { - out := new(ProposerSlashingResponse) - err := c.cc.Invoke(ctx, "/ethereum.slashing.Slasher/IsSlashableBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SlasherServer is the server API for Slasher service. -type SlasherServer interface { - IsSlashableAttestation(context.Context, *v1alpha1.IndexedAttestation) (*AttesterSlashingResponse, error) - IsSlashableBlock(context.Context, *v1alpha1.SignedBeaconBlockHeader) (*ProposerSlashingResponse, error) -} - -// UnimplementedSlasherServer can be embedded to have forward compatible implementations. -type UnimplementedSlasherServer struct { -} - -func (*UnimplementedSlasherServer) IsSlashableAttestation(ctx context.Context, req *v1alpha1.IndexedAttestation) (*AttesterSlashingResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method IsSlashableAttestation not implemented") -} -func (*UnimplementedSlasherServer) IsSlashableBlock(ctx context.Context, req *v1alpha1.SignedBeaconBlockHeader) (*ProposerSlashingResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method IsSlashableBlock not implemented") -} - -func RegisterSlasherServer(s *grpc.Server, srv SlasherServer) { - s.RegisterService(&_Slasher_serviceDesc, srv) -} - -func _Slasher_IsSlashableAttestation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1alpha1.IndexedAttestation) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SlasherServer).IsSlashableAttestation(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ethereum.slashing.Slasher/IsSlashableAttestation", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SlasherServer).IsSlashableAttestation(ctx, req.(*v1alpha1.IndexedAttestation)) - } - return interceptor(ctx, in, info, handler) -} - -func _Slasher_IsSlashableBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1alpha1.SignedBeaconBlockHeader) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SlasherServer).IsSlashableBlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ethereum.slashing.Slasher/IsSlashableBlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SlasherServer).IsSlashableBlock(ctx, req.(*v1alpha1.SignedBeaconBlockHeader)) - } - return interceptor(ctx, in, info, handler) -} - -var _Slasher_serviceDesc = grpc.ServiceDesc{ - ServiceName: "ethereum.slashing.Slasher", - HandlerType: (*SlasherServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "IsSlashableAttestation", - Handler: _Slasher_IsSlashableAttestation_Handler, - }, - { - MethodName: "IsSlashableBlock", - Handler: _Slasher_IsSlashableBlock_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "proto/slashing/slashing.proto", -} - -func (m *ProposerSlashingResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProposerSlashingResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProposerSlashingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ProposerSlashing) > 0 { - for iNdEx := len(m.ProposerSlashing) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProposerSlashing[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSlashing(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AttesterSlashingResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttesterSlashingResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttesterSlashingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.AttesterSlashing) > 0 { - for iNdEx := len(m.AttesterSlashing) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AttesterSlashing[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSlashing(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ProposalHistory) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProposalHistory) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProposalHistory) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.LatestEpochWritten != 0 { - i = encodeVarintSlashing(dAtA, i, uint64(m.LatestEpochWritten)) - i-- - dAtA[i] = 0x10 - } - if len(m.EpochBits) > 0 { - i -= len(m.EpochBits) - copy(dAtA[i:], m.EpochBits) - i = encodeVarintSlashing(dAtA, i, uint64(len(m.EpochBits))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AttestationHistory) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttestationHistory) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttestationHistory) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.LatestEpochWritten != 0 { - i = encodeVarintSlashing(dAtA, i, uint64(m.LatestEpochWritten)) - i-- - dAtA[i] = 0x10 - } - if len(m.TargetToSource) > 0 { - for k := range m.TargetToSource { - v := m.TargetToSource[k] - baseI := i - i = encodeVarintSlashing(dAtA, i, uint64(v)) - i-- - dAtA[i] = 0x10 - i = encodeVarintSlashing(dAtA, i, uint64(k)) - i-- - dAtA[i] = 0x8 - i = encodeVarintSlashing(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintSlashing(dAtA []byte, offset int, v uint64) int { - offset -= sovSlashing(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ProposerSlashingResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ProposerSlashing) > 0 { - for _, e := range m.ProposerSlashing { - l = e.Size() - n += 1 + l + sovSlashing(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AttesterSlashingResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AttesterSlashing) > 0 { - for _, e := range m.AttesterSlashing { - l = e.Size() - n += 1 + l + sovSlashing(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ProposalHistory) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.EpochBits) - if l > 0 { - n += 1 + l + sovSlashing(uint64(l)) - } - if m.LatestEpochWritten != 0 { - n += 1 + sovSlashing(uint64(m.LatestEpochWritten)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AttestationHistory) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.TargetToSource) > 0 { - for k, v := range m.TargetToSource { - _ = k - _ = v - mapEntrySize := 1 + sovSlashing(uint64(k)) + 1 + sovSlashing(uint64(v)) - n += mapEntrySize + 1 + sovSlashing(uint64(mapEntrySize)) - } - } - if m.LatestEpochWritten != 0 { - n += 1 + sovSlashing(uint64(m.LatestEpochWritten)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSlashing(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSlashing(x uint64) (n int) { - return sovSlashing(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ProposerSlashingResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProposerSlashingResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProposerSlashingResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProposerSlashing", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSlashing - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSlashing - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProposerSlashing = append(m.ProposerSlashing, &v1alpha1.ProposerSlashing{}) - if err := m.ProposerSlashing[len(m.ProposerSlashing)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSlashing(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSlashing - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthSlashing - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttesterSlashingResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttesterSlashingResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttesterSlashingResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttesterSlashing", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSlashing - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSlashing - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AttesterSlashing = append(m.AttesterSlashing, &v1alpha1.AttesterSlashing{}) - if err := m.AttesterSlashing[len(m.AttesterSlashing)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSlashing(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSlashing - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthSlashing - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProposalHistory) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProposalHistory: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProposalHistory: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EpochBits", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSlashing - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSlashing - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EpochBits = append(m.EpochBits[:0], dAtA[iNdEx:postIndex]...) - if m.EpochBits == nil { - m.EpochBits = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestEpochWritten", wireType) - } - m.LatestEpochWritten = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LatestEpochWritten |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSlashing(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSlashing - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthSlashing - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttestationHistory) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttestationHistory: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttestationHistory: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetToSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSlashing - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSlashing - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetToSource == nil { - m.TargetToSource = make(map[uint64]uint64) - } - var mapkey uint64 - var mapvalue uint64 - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - } else if fieldNum == 2 { - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - } else { - iNdEx = entryPreIndex - skippy, err := skipSlashing(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSlashing - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.TargetToSource[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestEpochWritten", wireType) - } - m.LatestEpochWritten = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSlashing - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LatestEpochWritten |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSlashing(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSlashing - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthSlashing - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSlashing(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSlashing - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSlashing - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSlashing - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSlashing - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSlashing - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSlashing - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSlashing = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSlashing = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSlashing = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/testing/ssz_static_test.go b/proto/testing/ssz_static_test.go index dad6c5c8c4..7f51077dc0 100644 --- a/proto/testing/ssz_static_test.go +++ b/proto/testing/ssz_static_test.go @@ -130,8 +130,13 @@ func UnmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (i return nil, nil case "Eth1Data": obj = ðpb.Eth1Data{} + case "Eth1Block": + t.Skip("Unused type") + return nil, nil case "Fork": obj = &pb.Fork{} + case "ForkData": + obj = &pb.ForkData{} case "HistoricalBatch": obj = &pb.HistoricalBatch{} case "IndexedAttestation": @@ -140,12 +145,16 @@ func UnmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (i obj = &pb.PendingAttestation{} case "ProposerSlashing": obj = ðpb.ProposerSlashing{} + case "SignedAggregateAndProof": + obj = &pb.SignedAggregateAndProof{} case "SignedBeaconBlock": obj = ðpb.SignedBeaconBlock{} case "SignedBeaconBlockHeader": obj = ðpb.SignedBeaconBlockHeader{} case "SignedVoluntaryExit": obj = ðpb.SignedVoluntaryExit{} + case "SigningRoot": + obj = &pb.SigningRoot{} case "Validator": obj = ðpb.Validator{} case "VoluntaryExit": diff --git a/shared/benchutil/benchmark_files/bState1Epoch-128Atts-16384Vals.ssz b/shared/benchutil/benchmark_files/bState1Epoch-128Atts-16384Vals.ssz index af65a82e29..c168b002ad 100644 Binary files a/shared/benchutil/benchmark_files/bState1Epoch-128Atts-16384Vals.ssz and b/shared/benchutil/benchmark_files/bState1Epoch-128Atts-16384Vals.ssz differ diff --git a/shared/benchutil/benchmark_files/bState2Epochs-128Atts-16384Vals.ssz b/shared/benchutil/benchmark_files/bState2Epochs-128Atts-16384Vals.ssz index 83e66a04bc..9f26b9522f 100644 Binary files a/shared/benchutil/benchmark_files/bState2Epochs-128Atts-16384Vals.ssz and b/shared/benchutil/benchmark_files/bState2Epochs-128Atts-16384Vals.ssz differ diff --git a/shared/benchutil/benchmark_files/fullBlock-128Atts-16384Vals.ssz b/shared/benchutil/benchmark_files/fullBlock-128Atts-16384Vals.ssz index aa0361e705..0abb802357 100644 Binary files a/shared/benchutil/benchmark_files/fullBlock-128Atts-16384Vals.ssz and b/shared/benchutil/benchmark_files/fullBlock-128Atts-16384Vals.ssz differ diff --git a/shared/bls/BUILD.bazel b/shared/bls/BUILD.bazel index 0ab26be629..86f3695ed3 100644 --- a/shared/bls/BUILD.bazel +++ b/shared/bls/BUILD.bazel @@ -8,13 +8,11 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/shared/bls", visibility = ["//visibility:public"], deps = [ - "//shared/bytesutil:go_default_library", "//shared/featureconfig:go_default_library", "//shared/hashutil:go_default_library", "//shared/params:go_default_library", "@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_pkg_errors//:go_default_library", - "@com_github_sirupsen_logrus//:go_default_library", "@herumi_bls_eth_go_binary//:go_default_library", ], ) diff --git a/shared/bls/bls.go b/shared/bls/bls.go index a334bc77a4..c05a909963 100644 --- a/shared/bls/bls.go +++ b/shared/bls/bls.go @@ -10,27 +10,23 @@ import ( "github.com/dgraph-io/ristretto" bls12 "github.com/herumi/bls-eth-go-binary/bls" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/params" - "github.com/sirupsen/logrus" ) func init() { - err := bls12.Init(bls12.BLS12_381) - if err != nil { + if err := bls12.Init(bls12.BLS12_381); err != nil { + panic(err) + } + if err := bls12.SetETHmode(1); err != nil { panic(err) } - bls12.SetETHserialization(true) } // DomainByteLength length of domain byte array. const DomainByteLength = 4 -// ForkVersionByteLength length of fork version byte array. -const ForkVersionByteLength = 4 - var maxKeys = int64(100000) var pubkeyCache, _ = ristretto.NewCache(&ristretto.Config{ NumCounters: maxKeys, @@ -134,11 +130,11 @@ func concatMsgAndDomain(msg []byte, domain uint64) []byte { } // Sign a message using a secret key - in a beacon/validator client. -func (s *SecretKey) Sign(msg []byte, domain uint64) *Signature { +func (s *SecretKey) Sign(msg []byte) *Signature { if featureconfig.Get().SkipBLSVerify { return &Signature{} } - signature := s.p.SignHashWithDomain(concatMsgAndDomain(msg, domain)) + signature := s.p.SignByte(msg) return &Signature{s: signature} } @@ -172,18 +168,18 @@ func (p *PublicKey) Aggregate(p2 *PublicKey) *PublicKey { return p } -// Verify a bls signature given a public key, a message, and a domain. -func (s *Signature) Verify(msg []byte, pub *PublicKey, domain uint64) bool { +// Verify a bls signature given a public key, a message. +func (s *Signature) Verify(msg []byte, pub *PublicKey) bool { if featureconfig.Get().SkipBLSVerify { return true } - return s.s.VerifyHashWithDomain(pub.p, concatMsgAndDomain(msg, domain)) + return s.s.VerifyByte(pub.p, msg) } // VerifyAggregate verifies each public key against its respective message. // This is vulnerable to rogue public-key attack. Each user must // provide a proof-of-knowledge of the public key. -func (s *Signature) VerifyAggregate(pubKeys []*PublicKey, msg [][32]byte, domain uint64) bool { +func (s *Signature) VerifyAggregate(pubKeys []*PublicKey, msg [][32]byte) bool { if featureconfig.Get().SkipBLSVerify { return true } @@ -194,36 +190,52 @@ func (s *Signature) VerifyAggregate(pubKeys []*PublicKey, msg [][32]byte, domain if size != len(msg) { return false } - hashWithDomains := make([]byte, 0, size*concatMsgDomainSize) + hashes := make([][]byte, 0, len(msg)) var rawKeys []bls12.PublicKey for i := 0; i < size; i++ { - hashWithDomains = append(hashWithDomains, concatMsgAndDomain(msg[i][:], domain)...) + hashes = append(hashes, msg[i][:]) rawKeys = append(rawKeys, *pubKeys[i].p) } - return s.s.VerifyAggregateHashWithDomain(rawKeys, hashWithDomains) + return s.s.VerifyAggregateHashes(rawKeys, hashes) } -// VerifyAggregateCommon verifies each public key against its respective message. +// AggregateVerify verifies each public key against its respective message. // This is vulnerable to rogue public-key attack. Each user must // provide a proof-of-knowledge of the public key. -func (s *Signature) VerifyAggregateCommon(pubKeys []*PublicKey, msg [32]byte, domain uint64) bool { +func (s *Signature) AggregateVerify(pubKeys []*PublicKey, msgs [][32]byte) bool { + if featureconfig.Get().SkipBLSVerify { + return true + } + size := len(pubKeys) + if size == 0 { + return false + } + if size != len(msgs) { + return false + } + msgSlices := []byte{} + var rawKeys []bls12.PublicKey + for i := 0; i < size; i++ { + msgSlices = append(msgSlices, msgs[i][:]...) + rawKeys = append(rawKeys, *pubKeys[i].p) + } + return s.s.AggregateVerify(rawKeys, msgSlices) +} + +// FastAggregateVerify verifies all the provided pubkeys with their aggregated signature. +func (s *Signature) FastAggregateVerify(pubKeys []*PublicKey, msg [32]byte) bool { if featureconfig.Get().SkipBLSVerify { return true } if len(pubKeys) == 0 { return false } - aggregated, err := pubKeys[0].Copy() - if err != nil { - logrus.WithError(err).Error("Failed to copy public key") - return false + rawKeys := make([]bls12.PublicKey, len(pubKeys)) + for i := 0; i < len(pubKeys); i++ { + rawKeys[i] = *pubKeys[i].p } - for i := 1; i < len(pubKeys); i++ { - aggregated.p.Add(pubKeys[i].p) - } - - return s.s.VerifyHashWithDomain(aggregated.p, concatMsgAndDomain(msg[:], domain)) + return s.s.FastAggregateVerify(rawKeys, msg[:]) } // NewAggregateSignature creates a blank aggregate signature. @@ -262,35 +274,6 @@ func (s *Signature) Marshal() []byte { return s.s.Serialize() } -// Domain returns the bls domain given by the domain type and the operation 4 byte fork version. -// -// Spec pseudocode definition: -// def get_domain(state: BeaconState, domain_type: DomainType, message_epoch: Epoch=None) -> Domain: -// """ -// Return the signature domain (fork version concatenated with domain type) of a message. -// """ -// epoch = get_current_epoch(state) if message_epoch is None else message_epoch -// fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version -// return compute_domain(domain_type, fork_version) -func Domain(domainType [DomainByteLength]byte, forkVersion [ForkVersionByteLength]byte) uint64 { - b := make([]byte, 8) - copy(b[:4], domainType[:4]) - copy(b[4:], forkVersion[:4]) - return bytesutil.FromBytes8(b) -} - -// ComputeDomain returns the domain version for BLS private key to sign and verify with a zeroed 4-byte -// array as the fork version. -// -// def compute_domain(domain_type: DomainType, fork_version: Version=Version()) -> Domain: -// """ -// Return the domain for the ``domain_type`` and ``fork_version``. -// """ -// return Domain(domain_type + fork_version) -func ComputeDomain(domainType [DomainByteLength]byte) uint64 { - return Domain(domainType, [4]byte{0, 0, 0, 0}) -} - // HashWithDomain hashes 32 byte message and uint64 domain parameters a Fp2 element func HashWithDomain(messageHash [32]byte, domain [8]byte) []byte { xReBytes := [41]byte{} diff --git a/shared/bls/bls_test.go b/shared/bls/bls_test.go index 4d9497575c..97b9dba6b0 100644 --- a/shared/bls/bls_test.go +++ b/shared/bls/bls_test.go @@ -29,8 +29,8 @@ func TestSignVerify(t *testing.T) { priv := bls.RandKey() pub := priv.PublicKey() msg := []byte("hello") - sig := priv.Sign(msg, 0) - if !sig.Verify(msg, pub, 0) { + sig := priv.Sign(msg) + if !sig.Verify(msg, pub) { t.Error("Signature did not verify") } } @@ -43,13 +43,13 @@ func TestVerifyAggregate(t *testing.T) { msg := [32]byte{'h', 'e', 'l', 'l', 'o', byte(i)} priv := bls.RandKey() pub := priv.PublicKey() - sig := priv.Sign(msg[:], 0) + sig := priv.Sign(msg[:]) pubkeys = append(pubkeys, pub) sigs = append(sigs, sig) msgs = append(msgs, msg) } aggSig := bls.AggregateSignatures(sigs) - if !aggSig.VerifyAggregate(pubkeys, msgs, 0) { + if !aggSig.VerifyAggregate(pubkeys, msgs) { t.Error("Signature did not verify") } } @@ -61,12 +61,12 @@ func TestVerifyAggregateCommon(t *testing.T) { for i := 0; i < 100; i++ { priv := bls.RandKey() pub := priv.PublicKey() - sig := priv.Sign(msg[:], 0) + sig := priv.Sign(msg[:]) pubkeys = append(pubkeys, pub) sigs = append(sigs, sig) } aggSig := bls.AggregateSignatures(sigs) - if !aggSig.VerifyAggregateCommon(pubkeys, msg, 0) { + if !aggSig.FastAggregateVerify(pubkeys, msg) { t.Error("Signature did not verify") } } @@ -77,32 +77,12 @@ func TestVerifyAggregate_ReturnsFalseOnEmptyPubKeyList(t *testing.T) { msg := [32]byte{'h', 'e', 'l', 'l', 'o'} aggSig := bls.AggregateSignatures(sigs) - if aggSig.VerifyAggregateCommon(pubkeys, msg, 0 /*domain*/) != false { + if aggSig.FastAggregateVerify(pubkeys, msg) != false { t.Error("Expected VerifyAggregate to return false with empty input " + "of public keys.") } } -func TestComputeDomain_OK(t *testing.T) { - tests := []struct { - epoch uint64 - domainType uint64 - domain uint64 - }{ - {epoch: 1, domainType: 4, domain: 4}, - {epoch: 2, domainType: 4, domain: 4}, - {epoch: 2, domainType: 5, domain: 5}, - {epoch: 3, domainType: 4, domain: 4}, - {epoch: 3, domainType: 5, domain: 5}, - } - for _, tt := range tests { - domain := bls.ComputeDomain(bytesutil.ToBytes4(bytesutil.Bytes4(tt.domainType))) - if domain != tt.domain { - t.Errorf("wanted domain version: %d, got: %d", tt.domain, domain) - } - } -} - func TestSecretKeyFromBytes(t *testing.T) { tests := []struct { name string diff --git a/shared/bls/spectest/BUILD.bazel b/shared/bls/spectest/BUILD.bazel index 9a567c6cb1..a92fe6477f 100644 --- a/shared/bls/spectest/BUILD.bazel +++ b/shared/bls/spectest/BUILD.bazel @@ -4,12 +4,11 @@ go_library( name = "go_default_library", testonly = True, srcs = [ - "aggregate_pubkeys_test.yaml.go", - "aggregate_sigs_test.yaml.go", - "msg_hash_compressed_test.yaml.go", - "msg_hash_uncompressed_test.yaml.go", - "priv_to_pub_test.yaml.go", - "sign_msg_test.yaml.go", + "aggregate_test.yaml.go", + "aggregate_verify_test.yaml.go", + "fast_aggregate_verify_test.yaml.go", + "sign_test.yaml.go", + "verify_test.yaml.go", ], importpath = "github.com/prysmaticlabs/prysm/shared/bls/spectest", visibility = ["//visibility:public"], @@ -19,12 +18,11 @@ go_test( name = "go_default_test", size = "small", srcs = [ - "aggregate_pubkeys_test.go", - "aggregate_sigs_test.go", - "msg_hash_compressed_test.go", - "msg_hash_uncompressed_test.go", - "priv_to_pub_test.go", - "sign_msg_test.go", + "aggregate_test.go", + "aggregate_verify_test.go", + "fast_aggregate_verify_test.go", + "sign_test.go", + "verify_test.go", ], data = [ "@eth2_spec_tests_general//:test_data", @@ -36,6 +34,5 @@ go_test( "//shared/bytesutil:go_default_library", "//shared/testutil:go_default_library", "@com_github_ghodss_yaml//:go_default_library", - "@herumi_bls_eth_go_binary//:go_default_library", ], ) diff --git a/shared/bls/spectest/aggregate_pubkeys_test.go b/shared/bls/spectest/aggregate_pubkeys_test.go deleted file mode 100644 index d5dd2480ca..0000000000 --- a/shared/bls/spectest/aggregate_pubkeys_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package spectest - -import ( - "bytes" - "encoding/hex" - "testing" - - "github.com/ghodss/yaml" - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/testutil" -) - -func TestAggregatePubkeysYaml(t *testing.T) { - file, err := testutil.BazelFileBytes("tests/general/phase0/bls/aggregate_pubkeys/small/agg_pub_keys/data.yaml") - if err != nil { - t.Fatalf("Failed to read file: %v", err) - } - - test := &AggregatePubkeysTest{} - if err := yaml.Unmarshal(file, test); err != nil { - t.Fatalf("Failed to unmarshal: %v", err) - } - - pubBytes, err := hex.DecodeString(test.Input[0][2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - pk, err := bls.PublicKeyFromBytes(pubBytes) - if err != nil { - t.Fatal(err) - } - for _, pk2 := range test.Input[1:] { - pubBytes2, err := hex.DecodeString(pk2[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - p, err := bls.PublicKeyFromBytes(pubBytes2) - if err != nil { - t.Fatal(err) - } - pk.Aggregate(p) - } - - outputBytes, err := hex.DecodeString(test.Output[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - if !bytes.Equal(outputBytes, pk.Marshal()) { - t.Fatalf("Output does not equal marshaled aggregated public "+ - "key bytes. wanted %#x but got %#x", outputBytes, pk.Marshal()) - } -} diff --git a/shared/bls/spectest/aggregate_pubkeys_test.yaml.go b/shared/bls/spectest/aggregate_pubkeys_test.yaml.go deleted file mode 100644 index 90840e6deb..0000000000 --- a/shared/bls/spectest/aggregate_pubkeys_test.yaml.go +++ /dev/null @@ -1,9 +0,0 @@ -// Code generated by yaml_to_go. DO NOT EDIT. -// source: aggregate_pubkeys.yaml - -package spectest - -type AggregatePubkeysTest struct { - Input []string `json:"input"` - Output string `json:"output" ssz:"size=48"` -} diff --git a/shared/bls/spectest/aggregate_sigs_test.go b/shared/bls/spectest/aggregate_test.go similarity index 88% rename from shared/bls/spectest/aggregate_sigs_test.go rename to shared/bls/spectest/aggregate_test.go index 3aef858b1d..241fe01c2c 100644 --- a/shared/bls/spectest/aggregate_sigs_test.go +++ b/shared/bls/spectest/aggregate_test.go @@ -7,12 +7,12 @@ import ( "testing" "github.com/ghodss/yaml" - "github.com/prysmaticlabs/prysm/shared/bls" + bls "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/testutil" ) -func TestAggregateSignaturesYaml(t *testing.T) { - testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/aggregate_sigs/small") +func TestAggregateYaml(t *testing.T) { + testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/aggregate/small") for _, folder := range testFolders { t.Run(folder.Name(), func(t *testing.T) { @@ -21,7 +21,7 @@ func TestAggregateSignaturesYaml(t *testing.T) { t.Fatalf("Failed to read file: %v", err) } - test := &AggregateSigsTest{} + test := &AggregateTest{} if err := yaml.Unmarshal(file, test); err != nil { t.Fatalf("Failed to unmarshal: %v", err) } diff --git a/shared/bls/spectest/aggregate_sigs_test.yaml.go b/shared/bls/spectest/aggregate_test.yaml.go similarity index 69% rename from shared/bls/spectest/aggregate_sigs_test.yaml.go rename to shared/bls/spectest/aggregate_test.yaml.go index ce2df7239e..1b179c28d7 100644 --- a/shared/bls/spectest/aggregate_sigs_test.yaml.go +++ b/shared/bls/spectest/aggregate_test.yaml.go @@ -1,9 +1,9 @@ // Code generated by yaml_to_go. DO NOT EDIT. -// source: aggregate_sigs.yaml +// source: aggregate.yaml package spectest -type AggregateSigsTest struct { +type AggregateTest struct { Input []string `json:"input"` Output string `json:"output" ssz:"size=96"` } diff --git a/shared/bls/spectest/aggregate_verify_test.go b/shared/bls/spectest/aggregate_verify_test.go new file mode 100644 index 0000000000..39938d40e3 --- /dev/null +++ b/shared/bls/spectest/aggregate_verify_test.go @@ -0,0 +1,68 @@ +package spectest + +import ( + "encoding/hex" + "path" + "testing" + + "github.com/ghodss/yaml" + bls "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/shared/bytesutil" + "github.com/prysmaticlabs/prysm/shared/testutil" +) + +func TestAggregateVerifyYaml(t *testing.T) { + testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/aggregate_verify/small") + + for i, folder := range testFolders { + t.Run(folder.Name(), func(t *testing.T) { + file, err := testutil.BazelFileBytes(path.Join(testFolderPath, folder.Name(), "data.yaml")) + if err != nil { + t.Fatalf("Failed to read file: %v", err) + } + test := &AggregateVerifyTest{} + if err := yaml.Unmarshal(file, test); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + + pubkeys := make([]*bls.PublicKey, 0, len(test.Input.Pairs)) + msgs := make([][32]byte, 0, len(test.Input.Pairs)) + for _, pair := range test.Input.Pairs { + pkBytes, err := hex.DecodeString(pair.Pubkey[2:]) + if err != nil { + t.Fatalf("Cannot decode string to bytes: %v", err) + } + pk, err := bls.PublicKeyFromBytes(pkBytes) + if err != nil { + t.Fatalf("Cannot unmarshal input to secret key: %v", err) + } + pubkeys = append(pubkeys, pk) + msgBytes, err := hex.DecodeString(pair.Message[2:]) + if err != nil { + t.Fatalf("Cannot decode string to bytes: %v", err) + } + if len(msgBytes) != 32 { + t.Fatalf("Message: %#x is not 32 bytes", msgBytes) + } + msgs = append(msgs, bytesutil.ToBytes32(msgBytes)) + } + sigBytes, err := hex.DecodeString(test.Input.Signature[2:]) + if err != nil { + t.Fatalf("Cannot decode string to bytes: %v", err) + } + sig, err := bls.SignatureFromBytes(sigBytes) + if err != nil { + if test.Output == false { + return + } + t.Fatalf("Cannot unmarshal input to signature: %v", err) + } + + verified := sig.AggregateVerify(pubkeys, msgs) + if verified != test.Output { + t.Fatalf("Signature does not match the expected verification output. "+ + "Expected %#v but received %#v for test case %d", test.Output, verified, i) + } + }) + } +} diff --git a/shared/bls/spectest/aggregate_verify_test.yaml.go b/shared/bls/spectest/aggregate_verify_test.yaml.go new file mode 100644 index 0000000000..ab84cb3fca --- /dev/null +++ b/shared/bls/spectest/aggregate_verify_test.yaml.go @@ -0,0 +1,15 @@ +// Code generated by yaml_to_go. DO NOT EDIT. +// source: aggregate_verify.yaml + +package spectest + +type AggregateVerifyTest struct { + Input struct { + Pairs []struct { + Pubkey string `json:"pubkey"` + Message string `json:"message"` + } `json:"pairs"` + Signature string `json:"signature"` + } `json:"input"` + Output bool `json:"output"` +} diff --git a/shared/bls/spectest/fast_aggregate_verify_test.go b/shared/bls/spectest/fast_aggregate_verify_test.go new file mode 100644 index 0000000000..9a318ff77f --- /dev/null +++ b/shared/bls/spectest/fast_aggregate_verify_test.go @@ -0,0 +1,66 @@ +package spectest + +import ( + "encoding/hex" + "path" + "testing" + + "github.com/ghodss/yaml" + bls "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/shared/bytesutil" + "github.com/prysmaticlabs/prysm/shared/testutil" +) + +func TestFastAggregateVerifyYaml(t *testing.T) { + testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/fast_aggregate_verify/small") + + for i, folder := range testFolders { + t.Run(folder.Name(), func(t *testing.T) { + file, err := testutil.BazelFileBytes(path.Join(testFolderPath, folder.Name(), "data.yaml")) + if err != nil { + t.Fatalf("Failed to read file: %v", err) + } + test := &FastAggregateVerifyTest{} + if err := yaml.Unmarshal(file, test); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + + pubkeys := make([]*bls.PublicKey, len(test.Input.Pubkeys)) + for j, raw := range test.Input.Pubkeys { + pkBytes, err := hex.DecodeString(raw[2:]) + if err != nil { + t.Fatalf("Cannot decode string to bytes: %v", err) + } + pk, err := bls.PublicKeyFromBytes(pkBytes) + if err != nil { + t.Fatalf("Cannot unmarshal input to secret key: %v", err) + } + pubkeys[j] = pk + } + + msgBytes, err := hex.DecodeString(test.Input.Message[2:]) + if err != nil { + t.Fatalf("Cannot decode string to bytes: %v", err) + } + + sigBytes, err := hex.DecodeString(test.Input.Signature[2:]) + if err != nil { + t.Fatalf("Cannot decode string to bytes: %v", err) + } + sig, err := bls.SignatureFromBytes(sigBytes) + if err != nil { + if test.Output == false { + return + } + t.Fatalf("Cannot unmarshal input to signature: %v", err) + } + + verified := sig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msgBytes)) + if verified != test.Output { + t.Fatalf("Signature does not match the expected verification output. "+ + "Expected %#v but received %#v for test case %d", test.Output, verified, i) + } + t.Log("Success") + }) + } +} diff --git a/shared/bls/spectest/fast_aggregate_verify_test.yaml.go b/shared/bls/spectest/fast_aggregate_verify_test.yaml.go new file mode 100644 index 0000000000..b0e1b42e51 --- /dev/null +++ b/shared/bls/spectest/fast_aggregate_verify_test.yaml.go @@ -0,0 +1,13 @@ +// Code generated by yaml_to_go. DO NOT EDIT. +// source: fast_aggregate_verify.yaml + +package spectest + +type FastAggregateVerifyTest struct { + Input struct { + Pubkeys []string `json:"pubkeys"` + Message string `json:"message"` + Signature string `json:"signature"` + } `json:"input"` + Output bool `json:"output"` +} diff --git a/shared/bls/spectest/msg_hash_compressed_test.go b/shared/bls/spectest/msg_hash_compressed_test.go deleted file mode 100644 index 0601e78c0e..0000000000 --- a/shared/bls/spectest/msg_hash_compressed_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package spectest - -import ( - "bytes" - "encoding/hex" - "path" - "testing" - - "github.com/ghodss/yaml" - bls2 "github.com/herumi/bls-eth-go-binary/bls" - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/testutil" -) - -// Note: This actually tests the underlying library as we don't have a need for -// HashG2Compressed in our local BLS API. -func TestMsgHashCompressed(t *testing.T) { - testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/msg_hash_compressed/small") - - for _, folder := range testFolders { - t.Run(folder.Name(), func(t *testing.T) { - file, err := testutil.BazelFileBytes(path.Join(testFolderPath, folder.Name(), "data.yaml")) - if err != nil { - t.Fatalf("Failed to read file: %v", err) - } - test := &MsgHashCompressedTest{} - if err := yaml.Unmarshal(file, test); err != nil { - t.Fatalf("Failed to unmarshal: %v", err) - } - - msgBytes, err := hex.DecodeString(test.Input.Message[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - domain, err := hex.DecodeString(test.Input.Domain[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - hash := bls.HashWithDomain( - bytesutil.ToBytes32(msgBytes), - bytesutil.ToBytes8(domain), - ) - g2Point := &bls2.G2{} - fp2Point := &bls2.Fp2{} - err = fp2Point.Deserialize(hash) - if err != nil { - t.Fatal(err) - } - err = bls2.MapToG2(g2Point, fp2Point) - if err != nil { - t.Fatal(err) - } - compressedHash := g2Point.Serialize() - - var buf []byte - for _, innerString := range test.Output { - slice, err := hex.DecodeString(innerString[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - buf = append(buf, slice...) - } - if !bytes.Equal(buf, compressedHash[:]) { - t.Logf("Domain=%d", domain) - t.Fatalf("Hash does not match the expected output. "+ - "Expected %#x but received %#x", buf, compressedHash) - } - t.Logf("Success. Domain=%d", domain) - }) - } -} diff --git a/shared/bls/spectest/msg_hash_compressed_test.yaml.go b/shared/bls/spectest/msg_hash_compressed_test.yaml.go deleted file mode 100644 index 39d32d2a00..0000000000 --- a/shared/bls/spectest/msg_hash_compressed_test.yaml.go +++ /dev/null @@ -1,12 +0,0 @@ -// Code generated by yaml_to_go. DO NOT EDIT. -// source: g2_compressed.yaml - -package spectest - -type MsgHashCompressedTest struct { - Input struct { - Message string `json:"message"` - Domain string `json:"domain"` - } `json:"input"` - Output []string `json:"output"` -} diff --git a/shared/bls/spectest/msg_hash_uncompressed_test.go b/shared/bls/spectest/msg_hash_uncompressed_test.go deleted file mode 100644 index 10a35d2652..0000000000 --- a/shared/bls/spectest/msg_hash_uncompressed_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package spectest - -import ( - "bytes" - "encoding/hex" - "path" - "testing" - - "github.com/ghodss/yaml" - bls2 "github.com/herumi/bls-eth-go-binary/bls" - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/testutil" -) - -// Note: This actually tests the underlying library as we don't have a need for -// HashG2Uncompressed in our local BLS API. -func TestMsgHashUncompressed(t *testing.T) { - t.Skip("The python uncompressed method does not match the go uncompressed method and this isn't very important") - testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/msg_hash_uncompressed/small") - - for _, folder := range testFolders { - t.Run(folder.Name(), func(t *testing.T) { - file, err := testutil.BazelFileBytes(path.Join(testFolderPath, folder.Name(), "data.yaml")) - if err != nil { - t.Fatalf("Failed to read file: %v", err) - } - test := &MsgHashUncompressedTest{} - if err := yaml.Unmarshal(file, test); err != nil { - t.Fatalf("Failed to unmarshal: %v", err) - } - - domain, err := hex.DecodeString(test.Input.Domain[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - msgBytes, err := hex.DecodeString(test.Input.Message[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - hash := bls.HashWithDomain( - bytesutil.ToBytes32(msgBytes), - bytesutil.ToBytes8(domain), - ) - sig := bls2.HashAndMapToSignature(hash) - uncompressed := sig.Serialize() - - var buf []byte - for _, outputStrings := range test.Output { - for _, innerString := range outputStrings { - slice, err := hex.DecodeString(innerString[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - buf = append(buf, slice...) - } - } - if !bytes.Equal(buf, uncompressed[:]) { - t.Logf("Domain=%d", domain) - t.Logf("Message=%#x", msgBytes) - t.Fatalf("Hash does not match the expected output. "+ - "Expected %#x but received %#x", buf, uncompressed) - } - t.Logf("Success. Domain=%d", domain) - }) - } -} diff --git a/shared/bls/spectest/msg_hash_uncompressed_test.yaml.go b/shared/bls/spectest/msg_hash_uncompressed_test.yaml.go deleted file mode 100644 index 08bee18f94..0000000000 --- a/shared/bls/spectest/msg_hash_uncompressed_test.yaml.go +++ /dev/null @@ -1,12 +0,0 @@ -// Code generated by yaml_to_go. DO NOT EDIT. -// source: g2_uncompressed.yaml - -package spectest - -type MsgHashUncompressedTest struct { - Input struct { - Message string `json:"message"` - Domain string `json:"domain"` - } `json:"input"` - Output [][]string `json:"output"` -} diff --git a/shared/bls/spectest/priv_to_pub_test.go b/shared/bls/spectest/priv_to_pub_test.go deleted file mode 100644 index 5cc6279f28..0000000000 --- a/shared/bls/spectest/priv_to_pub_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package spectest - -import ( - "bytes" - "encoding/hex" - "path" - "testing" - - "github.com/ghodss/yaml" - "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/testutil" -) - -func TestPrivToPubYaml(t *testing.T) { - testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/priv_to_pub/small") - - for _, folder := range testFolders { - t.Run(folder.Name(), func(t *testing.T) { - file, err := testutil.BazelFileBytes(path.Join(testFolderPath, folder.Name(), "data.yaml")) - if err != nil { - t.Fatalf("Failed to read file: %v", err) - } - test := &PrivToPubTest{} - if err := yaml.Unmarshal(file, test); err != nil { - t.Fatalf("Failed to unmarshal: %v", err) - } - - pkBytes, err := hex.DecodeString(test.Input[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - sk, err := bls.SecretKeyFromBytes(pkBytes) - if err != nil { - t.Fatalf("Cannot unmarshal input to secret key: %v", err) - } - - outputBytes, err := hex.DecodeString(test.Output[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) - } - if !bytes.Equal(outputBytes, sk.PublicKey().Marshal()) { - t.Fatalf("Output does not marshaled public key bytes wanted %#x but got %#x", outputBytes, sk.PublicKey().Marshal()) - } - }) - } -} diff --git a/shared/bls/spectest/priv_to_pub_test.yaml.go b/shared/bls/spectest/priv_to_pub_test.yaml.go deleted file mode 100644 index 151b67149e..0000000000 --- a/shared/bls/spectest/priv_to_pub_test.yaml.go +++ /dev/null @@ -1,9 +0,0 @@ -// Code generated by yaml_to_go. DO NOT EDIT. -// source: priv_to_pub.yaml - -package spectest - -type PrivToPubTest struct { - Input string `json:"input"` - Output string `json:"output"` -} diff --git a/shared/bls/spectest/sign_msg_test.go b/shared/bls/spectest/sign_test.go similarity index 69% rename from shared/bls/spectest/sign_msg_test.go rename to shared/bls/spectest/sign_test.go index ebd068962b..9c81c2aefc 100644 --- a/shared/bls/spectest/sign_msg_test.go +++ b/shared/bls/spectest/sign_test.go @@ -2,20 +2,19 @@ package spectest import ( "bytes" - "encoding/binary" "encoding/hex" "path" "testing" "github.com/ghodss/yaml" - "github.com/prysmaticlabs/prysm/shared/bls" + bls "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/testutil" ) func TestSignMessageYaml(t *testing.T) { - testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/sign_msg/small") + testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/sign/small") - for _, folder := range testFolders { + for i, folder := range testFolders { t.Run(folder.Name(), func(t *testing.T) { file, err := testutil.BazelFileBytes(path.Join(testFolderPath, folder.Name(), "data.yaml")) if err != nil { @@ -39,23 +38,22 @@ func TestSignMessageYaml(t *testing.T) { if err != nil { t.Fatalf("Cannot decode string to bytes: %v", err) } - domain, err := hex.DecodeString(test.Input.Domain[2:]) - if err != nil { - t.Fatalf("Cannot decode string to bytes: %v", err) + sig := sk.Sign(msgBytes) + + if !sig.Verify(msgBytes, sk.PublicKey()) { + t.Fatal("could not verify signature") } - num := binary.LittleEndian.Uint64(domain) - sig := sk.Sign(msgBytes, num) outputBytes, err := hex.DecodeString(test.Output[2:]) if err != nil { t.Fatalf("Cannot decode string to bytes: %v", err) } + if !bytes.Equal(outputBytes, sig.Marshal()) { - t.Logf("Domain=%d", domain) - t.Fatalf("Signature does not match the expected output. "+ - "Expected %#x but received %#x", outputBytes, sig.Marshal()) + t.Fatalf("Test Case %d: Signature does not match the expected output. "+ + "Expected %#x but received %#x", i, outputBytes, sig.Marshal()) } - t.Logf("Success. Domain=%d", domain) + t.Log("Success") }) } } diff --git a/shared/bls/spectest/sign_msg_test.yaml.go b/shared/bls/spectest/sign_test.yaml.go similarity index 88% rename from shared/bls/spectest/sign_msg_test.yaml.go rename to shared/bls/spectest/sign_test.yaml.go index df76c5be22..501cb07472 100644 --- a/shared/bls/spectest/sign_msg_test.yaml.go +++ b/shared/bls/spectest/sign_test.yaml.go @@ -7,7 +7,6 @@ type SignMsgTest struct { Input struct { Privkey string `json:"privkey"` Message string `json:"message"` - Domain string `json:"domain"` } `json:"input"` Output string `json:"output"` } diff --git a/shared/bls/spectest/verify_test.go b/shared/bls/spectest/verify_test.go new file mode 100644 index 0000000000..fb2a1f08b3 --- /dev/null +++ b/shared/bls/spectest/verify_test.go @@ -0,0 +1,61 @@ +package spectest + +import ( + "encoding/hex" + "path" + "testing" + + "github.com/ghodss/yaml" + bls "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/shared/testutil" +) + +func TestVerifyMessageYaml(t *testing.T) { + testFolders, testFolderPath := testutil.TestFolders(t, "general", "bls/verify/small") + + for i, folder := range testFolders { + t.Run(folder.Name(), func(t *testing.T) { + file, err := testutil.BazelFileBytes(path.Join(testFolderPath, folder.Name(), "data.yaml")) + if err != nil { + t.Fatalf("Failed to read file: %v", err) + } + test := &VerifyMsgTest{} + if err := yaml.Unmarshal(file, test); err != nil { + t.Fatalf("Failed to unmarshal: %v", err) + } + + pkBytes, err := hex.DecodeString(test.Input.Pubkey[2:]) + if err != nil { + t.Fatalf("Cannot decode string to bytes: %v", err) + } + pk, err := bls.PublicKeyFromBytes(pkBytes) + if err != nil { + t.Fatalf("Cannot unmarshal input to secret key: %v", err) + } + + msgBytes, err := hex.DecodeString(test.Input.Message[2:]) + if err != nil { + t.Fatalf("Cannot decode string to bytes: %v", err) + } + + sigBytes, err := hex.DecodeString(test.Input.Signature[2:]) + if err != nil { + t.Fatalf("Cannot decode string to bytes: %v", err) + } + sig, err := bls.SignatureFromBytes(sigBytes) + if err != nil { + if test.Output == false { + return + } + t.Fatalf("Cannot unmarshal input to signature: %v", err) + } + + verified := sig.Verify(msgBytes, pk) + if verified != test.Output { + t.Fatalf("Signature does not match the expected verification output. "+ + "Expected %#v but received %#v for test case %d", test.Output, verified, i) + } + t.Log("Success") + }) + } +} diff --git a/shared/bls/spectest/verify_test.yaml.go b/shared/bls/spectest/verify_test.yaml.go new file mode 100644 index 0000000000..e45c3c721f --- /dev/null +++ b/shared/bls/spectest/verify_test.yaml.go @@ -0,0 +1,13 @@ +// Code generated by yaml_to_go. DO NOT EDIT. +// source: verify.yaml + +package spectest + +type VerifyMsgTest struct { + Input struct { + Pubkey string `json:"pubkey"` + Message string `json:"message"` + Signature string `json:"signature"` + } `json:"input"` + Output bool `json:"output"` +} diff --git a/shared/bytesutil/bytes.go b/shared/bytesutil/bytes.go index 7ce8b0e7ff..c8c9931449 100644 --- a/shared/bytesutil/bytes.go +++ b/shared/bytesutil/bytes.go @@ -133,6 +133,15 @@ func ToBytes48(x []byte) [48]byte { return y } +// ToBytes64 is a convenience method for converting a byte slice to a fix +// sized 64 byte array. This method will truncate the input if it is larger +// than 64 bytes. +func ToBytes64(x []byte) [64]byte { + var y [64]byte + copy(y[:], x) + return y +} + // ToBool is a convenience method for converting a byte to a bool. // This method will use the first bit of the 0 byte to generate the returned value. func ToBool(x byte) bool { diff --git a/shared/cmd/flags.go b/shared/cmd/flags.go index dd8ffcf96a..7fb8bde867 100644 --- a/shared/cmd/flags.go +++ b/shared/cmd/flags.go @@ -112,6 +112,12 @@ var ( Usage: "The file containing the private key to use in communications with other peers.", Value: "", } + // P2PMetadata defines a flag to specify the location of the peer metadata file. + P2PMetadata = &cli.StringFlag{ + Name: "p2p-metadata", + Usage: "The file containing the metadata to communicate with other peers.", + Value: "", + } // P2PMaxPeers defines a flag to specify the max number of peers in libp2p. P2PMaxPeers = &cli.Int64Flag{ Name: "p2p-max-peers", diff --git a/shared/featureconfig/config.go b/shared/featureconfig/config.go index 10cc504d63..82400cdc2d 100644 --- a/shared/featureconfig/config.go +++ b/shared/featureconfig/config.go @@ -28,11 +28,10 @@ var log = logrus.WithField("prefix", "flags") // Flags is a struct to represent which features the client will perform on runtime. type Flags struct { - NoCustomConfig bool // NoCustomConfigFlag determines whether to launch a beacon chain using real parameters or demo parameters. MinimalConfig bool // MinimalConfig as defined in the spec. WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory. InitSyncNoVerify bool // InitSyncNoVerify when initial syncing w/o verifying block's contents. - EnableDynamicCommitteeSubnets bool // Enables dynamic attestation committee subnets via p2p. + DisableDynamicCommitteeSubnets bool // Disables dynamic attestation committee subnets via p2p. SkipBLSVerify bool // Skips BLS verification across the runtime. EnableBackupWebhook bool // EnableBackupWebhook to allow database backups to trigger from monitoring port /db/backup. PruneEpochBoundaryStates bool // PruneEpochBoundaryStates prunes the epoch boundary state before last finalized check point. @@ -47,7 +46,7 @@ type Flags struct { CheckHeadState bool // CheckHeadState checks the current headstate before retrieving the desired state from the db. EnableNoise bool // EnableNoise enables the beacon node to use NOISE instead of SECIO when performing a handshake with another peer. DontPruneStateStartUp bool // DontPruneStateStartUp disables pruning state upon beacon node start up. - NewStateMgmt bool // NewStateMgmt enables the new experimental state mgmt service. + DisableNewStateMgmt bool // NewStateMgmt disables the new state mgmt service. DisableInitSyncQueue bool // DisableInitSyncQueue disables the new initial sync implementation. EnableFieldTrie bool // EnableFieldTrie enables the state from using field specific tries when computing the root. EnableBlockHTR bool // EnableBlockHTR enables custom hashing of our beacon blocks. @@ -105,9 +104,9 @@ func ConfigureBeaconChain(ctx *cli.Context) { log.Warn("UNSAFE: Disabled fork choice for updating chain head") cfg.DisableForkChoice = true } - if ctx.Bool(enableDynamicCommitteeSubnets.Name) { - log.Warn("Enabled dynamic attestation committee subnets") - cfg.EnableDynamicCommitteeSubnets = true + if ctx.Bool(disableDynamicCommitteeSubnets.Name) { + log.Warn("Disabled dynamic attestation committee subnets") + cfg.DisableDynamicCommitteeSubnets = true } cfg.EnableSSZCache = true if ctx.Bool(disableSSZCache.Name) { @@ -172,9 +171,9 @@ func ConfigureBeaconChain(ctx *cli.Context) { log.Warn("Not enabling state pruning upon start up") cfg.DontPruneStateStartUp = true } - if ctx.Bool(newStateMgmt.Name) { - log.Warn("Enabling experimental state management service") - cfg.NewStateMgmt = true + if ctx.Bool(disableNewStateMgmt.Name) { + log.Warn("Disabling state management service") + cfg.DisableNewStateMgmt = true } if ctx.Bool(disableInitSyncQueue.Name) { log.Warn("Disabled initial sync queue") @@ -227,27 +226,12 @@ func complainOnDeprecatedFlags(ctx *cli.Context) { } func configureConfig(ctx *cli.Context, cfg *Flags) *Flags { - if ctx.Bool(noCustomConfigFlag.Name) { - log.Warn("Using default mainnet config") - cfg.NoCustomConfig = true - } if ctx.Bool(minimalConfigFlag.Name) { log.Warn("Using minimal config") cfg.MinimalConfig = true - } - // Use custom config values if the --no-custom-config flag is not set. - if !cfg.NoCustomConfig { - if cfg.MinimalConfig { - log.WithField( - "config", "minimal-spec", - ).Info("Using custom chain parameters") - params.UseMinimalConfig() - } else { - log.WithField( - "config", "demo", - ).Info("Using custom chain parameters") - params.UseDemoBeaconConfig() - } + params.UseMinimalConfig() + } else { + log.Warn("Using default mainnet config") } return cfg } diff --git a/shared/featureconfig/flags.go b/shared/featureconfig/flags.go index d3a4af378f..d9e755e2c2 100644 --- a/shared/featureconfig/flags.go +++ b/shared/featureconfig/flags.go @@ -9,10 +9,6 @@ var ( Name: "broadcast-slashing", Usage: "Broadcast slashings from slashing pool.", } - noCustomConfigFlag = &cli.BoolFlag{ - Name: "no-custom-config", - Usage: "Run the beacon chain with the real parameters from phase 0.", - } minimalConfigFlag = &cli.BoolFlag{ Name: "minimal-config", Usage: "Use minimal config with parameters as defined in the spec.", @@ -21,9 +17,9 @@ var ( Name: "interop-write-ssz-state-transitions", Usage: "Write ssz states to disk after attempted state transition", } - enableDynamicCommitteeSubnets = &cli.BoolFlag{ - Name: "enable-dynamic-committee-subnets", - Usage: "Enable dynamic committee attestation subnets.", + disableDynamicCommitteeSubnets = &cli.BoolFlag{ + Name: "disable-dynamic-committee-subnets", + Usage: "Disable dynamic committee attestation subnets.", } // disableForkChoiceUnsafeFlag disables using the LMD-GHOST fork choice to update // the head of the chain based on attestations and instead accepts any valid received block @@ -120,9 +116,9 @@ var ( Name: "dont-prune-state-start-up", Usage: "Don't prune historical states upon start up", } - newStateMgmt = &cli.BoolFlag{ - Name: "new-state-mgmt", - Usage: "This enables the usage of experimental state mgmt service across Prysm", + disableNewStateMgmt = &cli.BoolFlag{ + Name: "disable-new-state-mgmt", + Usage: "This disables the usage of state mgmt service across Prysm", } disableInitSyncQueue = &cli.BoolFlag{ Name: "disable-init-sync-queue", @@ -146,6 +142,16 @@ var ( const deprecatedUsage = "DEPRECATED. DO NOT USE." var ( + deprecatedEnableDynamicCommitteeSubnets = &cli.BoolFlag{ + Name: "enable-dynamic-committee-subnets", + Usage: deprecatedUsage, + Hidden: true, + } + deprecatedNoCustomConfigFlag = &cli.BoolFlag{ + Name: "no-custom-config", + Usage: deprecatedUsage, + Hidden: true, + } deprecatedEnableInitSyncQueue = &cli.BoolFlag{ Name: "enable-initial-sync-queue", Usage: deprecatedUsage, @@ -272,6 +278,11 @@ var ( Usage: deprecatedUsage, Hidden: true, } + deprecatedDiscv5Flag = &cli.BoolFlag{ + Name: "enable-discv5", + Usage: deprecatedUsage, + Hidden: true, + } deprecatedEnableSSZCache = &cli.BoolFlag{ Name: "enable-ssz-cache", Usage: deprecatedUsage, @@ -280,6 +291,8 @@ var ( ) var deprecatedFlags = []cli.Flag{ + deprecatedEnableDynamicCommitteeSubnets, + deprecatedNoCustomConfigFlag, deprecatedEnableInitSyncQueue, deprecatedEnableFinalizedBlockRootIndexFlag, deprecatedScatterFlag, @@ -305,6 +318,7 @@ var deprecatedFlags = []cli.Flag{ deprecatedInitSyncCacheStateFlag, deprecatedProtectAttesterFlag, deprecatedProtectProposerFlag, + deprecatedDiscv5Flag, deprecatedEnableSSZCache, } @@ -323,12 +337,11 @@ var E2EValidatorFlags = []string{ // BeaconChainFlags contains a list of all the feature flags that apply to the beacon-chain client. var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{ - noCustomConfigFlag, customGenesisDelayFlag, minimalConfigFlag, writeSSZStateTransitionsFlag, disableForkChoiceUnsafeFlag, - enableDynamicCommitteeSubnets, + disableDynamicCommitteeSubnets, disableSSZCache, enableEth1DataVoteCacheFlag, initSyncVerifyEverythingFlag, @@ -345,7 +358,7 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{ enableNoiseHandshake, dontPruneStateStartUp, broadcastSlashingFlag, - newStateMgmt, + disableNewStateMgmt, disableInitSyncQueue, enableFieldTrie, enableCustomBlockHTR, @@ -360,6 +373,4 @@ var E2EBeaconChainFlags = []string{ "--enable-state-gen-sig-verify", "--check-head-state", "--enable-state-field-trie", - // TODO(5123): This flag currently fails E2E. Commenting until it's resolved. - //"--enable-dynamic-committee-subnets", } diff --git a/shared/interop/BUILD.bazel b/shared/interop/BUILD.bazel index 1ce437bc1d..2947fbcec3 100644 --- a/shared/interop/BUILD.bazel +++ b/shared/interop/BUILD.bazel @@ -9,6 +9,7 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/shared/interop", visibility = ["//visibility:public"], deps = [ + "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/state:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//shared/bls:go_default_library", diff --git a/shared/interop/generate_genesis_state.go b/shared/interop/generate_genesis_state.go index 059c383e80..7a90c85dc5 100644 --- a/shared/interop/generate_genesis_state.go +++ b/shared/interop/generate_genesis_state.go @@ -6,6 +6,7 @@ import ( "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bls" @@ -149,8 +150,15 @@ func createDepositData(privKey *bls.SecretKey, pubKey *bls.PublicKey) (*ethpb.De if err != nil { return nil, err } - domain := bls.ComputeDomain(params.BeaconConfig().DomainDeposit) - di.Signature = privKey.Sign(sr[:], domain).Marshal() + domain, err := helpers.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil) + if err != nil { + return nil, err + } + root, err := ssz.HashTreeRoot(&pb.SigningRoot{ObjectRoot: sr[:], Domain: domain}) + if err != nil { + return nil, err + } + di.Signature = privKey.Sign(root[:]).Marshal() return di, nil } diff --git a/shared/keystore/BUILD.bazel b/shared/keystore/BUILD.bazel index 45f60ced7f..42ec165cc0 100644 --- a/shared/keystore/BUILD.bazel +++ b/shared/keystore/BUILD.bazel @@ -12,6 +12,8 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/shared/keystore", visibility = ["//visibility:public"], deps = [ + "//beacon-chain/core/helpers:go_default_library", + "//proto/beacon/p2p/v1:go_default_library", "//shared/bls:go_default_library", "//shared/hashutil:go_default_library", "//shared/params:go_default_library", @@ -37,6 +39,7 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//proto/beacon/p2p/v1:go_default_library", "//shared/bls:go_default_library", "//shared/bytesutil:go_default_library", "//shared/params:go_default_library", diff --git a/shared/keystore/deposit_input.go b/shared/keystore/deposit_input.go index 3217edfbc2..1b852d73bf 100644 --- a/shared/keystore/deposit_input.go +++ b/shared/keystore/deposit_input.go @@ -3,7 +3,8 @@ package keystore import ( ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" - "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/params" ) @@ -34,8 +35,15 @@ func DepositInput(depositKey *Key, withdrawalKey *Key, amountInGwei uint64) (*et return nil, [32]byte{}, err } - domain := bls.ComputeDomain(params.BeaconConfig().DomainDeposit) - di.Signature = depositKey.SecretKey.Sign(sr[:], domain).Marshal() + domain, err := helpers.ComputeDomain(params.BeaconConfig().DomainDeposit, nil /*forkVersion*/, nil/*genesisValidatorsRoot*/) + if err != nil { + return nil, [32]byte{}, err + } + root, err := ssz.HashTreeRoot(&pb.SigningRoot{ObjectRoot: sr[:], Domain: domain}) + if err != nil { + return nil, [32]byte{}, err + } + di.Signature = depositKey.SecretKey.Sign(root[:]).Marshal() dr, err := ssz.HashTreeRoot(di) if err != nil { diff --git a/shared/keystore/deposit_input_test.go b/shared/keystore/deposit_input_test.go index 98647e23a3..68743fd7b5 100644 --- a/shared/keystore/deposit_input_test.go +++ b/shared/keystore/deposit_input_test.go @@ -5,13 +5,14 @@ import ( "testing" "github.com/prysmaticlabs/go-ssz" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bls" - "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/keystore" "github.com/prysmaticlabs/prysm/shared/params" ) func TestDepositInput_GeneratesPb(t *testing.T) { + t.Skip("To be resolved until 5119 gets in") k1, err := keystore.NewKey() if err != nil { t.Fatal(err) @@ -38,9 +39,12 @@ func TestDepositInput_GeneratesPb(t *testing.T) { if err != nil { t.Fatal(err) } - - dom := bytesutil.FromBytes4(params.BeaconConfig().DomainDeposit[:]) - if !sig.Verify(sr[:], k1.PublicKey, dom) { + dom := params.BeaconConfig().DomainDeposit + root, err := ssz.HashTreeRoot(&pb.SigningRoot{ObjectRoot: sr[:], Domain: dom[:]}) + if err != nil { + t.Fatal(err) + } + if !sig.Verify(root[:], k1.PublicKey) { t.Error("Invalid proof of deposit input signature") } } diff --git a/shared/params/config.go b/shared/params/config.go index e57ca44e4d..e507666ce0 100644 --- a/shared/params/config.go +++ b/shared/params/config.go @@ -26,6 +26,9 @@ type BeaconChainConfig struct { MinGenesisActiveValidatorCount uint64 `yaml:"MIN_GENESIS_ACTIVE_VALIDATOR_COUNT"` // MinGenesisActiveValidatorCount defines how many validator deposits needed to kick off beacon chain. MinGenesisTime uint64 `yaml:"MIN_GENESIS_TIME"` // MinGenesisTime is the time that needed to pass before kicking off beacon chain. TargetAggregatorsPerCommittee uint64 // TargetAggregatorsPerCommittee defines the number of aggregators inside one committee. + HysteresisQuotient uint64 // HysteresisQuotient defines the hysteresis quotient for effective balance calculations. + HysteresisDownwardMultiplier uint64 // HysteresisDownwardMultiplier defines the hysteresis downward multiplier for effective balance calculations. + HysteresisUpwardMultiplier uint64 // HysteresisUpwardMultiplier defines the hysteresis upward multiplier for effective balance calculations. // Gwei value constants. MinDepositAmount uint64 `yaml:"MIN_DEPOSIT_AMOUNT"` // MinDepositAmount is the maximal amount of Gwei a validator can send to the deposit contract at once. @@ -43,7 +46,7 @@ type BeaconChainConfig struct { SlotsPerEpoch uint64 `yaml:"SLOTS_PER_EPOCH"` // SlotsPerEpoch is the number of slots in an epoch. MinSeedLookahead uint64 `yaml:"MIN_SEED_LOOKAHEAD"` // SeedLookahead is the duration of randao look ahead seed. MaxSeedLookahead uint64 `yaml:"MAX_SEED_LOOKAHEAD"` // MaxSeedLookahead is the duration a validator has to wait for entry and exit in epoch. - SlotsPerEth1VotingPeriod uint64 `yaml:"SLOTS_PER_ETH1_VOTING_PERIOD"` // SlotsPerEth1VotingPeriod defines how often the merkle root of deposit receipts get updated in beacon node. + EpochsPerEth1VotingPeriod uint64 `yaml:"EPOCHS_PER_ETH1_VOTING_PERIOD"` // EpochsPerEth1VotingPeriod defines how often the merkle root of deposit receipts get updated in beacon node on per epoch basis. SlotsPerHistoricalRoot uint64 `yaml:"SLOTS_PER_HISTORICAL_ROOT"` // SlotsPerHistoricalRoot defines how often the historical root is saved. MinValidatorWithdrawabilityDelay uint64 `yaml:"MIN_VALIDATOR_WITHDRAWABILITY_DELAY"` // MinValidatorWithdrawabilityDelay is the shortest amount of time a validator has to wait to withdraw. PersistentCommitteePeriod uint64 `yaml:"PERSISTENT_COMMITTEE_PERIOD"` // PersistentCommitteePeriod is the minimum amount of epochs a validator must participate before exiting. @@ -51,7 +54,7 @@ type BeaconChainConfig struct { Eth1FollowDistance uint64 // Eth1FollowDistance is the number of eth1.0 blocks to wait before considering a new deposit for voting. This only applies after the chain as been started. SafeSlotsToUpdateJustified uint64 // SafeSlotsToUpdateJustified is the minimal slots needed to update justified check point. AttestationPropagationSlotRange uint64 // AttestationPropagationSlotRange is the maximum number of slots during which an attestation can be propagated. - + SecondsPerETH1Block uint64 `yaml:"SECONDS_PER_ETH1_BLOCK"` // SecondsPerETH1Block is the approximate time for a single eth1 block to be produced. // State list lengths EpochsPerHistoricalVector uint64 `yaml:"EPOCHS_PER_HISTORICAL_VECTOR"` // EpochsPerHistoricalVector defines max length in epoch to store old historical stats in beacon state. EpochsPerSlashingsVector uint64 `yaml:"EPOCHS_PER_SLASHINGS_VECTOR"` // EpochsPerSlashingsVector defines max length in epoch to store old stats to recompute slashing witness. @@ -73,11 +76,13 @@ type BeaconChainConfig struct { MaxVoluntaryExits uint64 `yaml:"MAX_VOLUNTARY_EXITS"` // MaxVoluntaryExits defines the maximum number of validator exits in a block. // BLS domain values. - DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification. - DomainRandao [4]byte `yaml:"DOMAIN_RANDAO"` // DomainRandao defines the BLS signature domain for randao verification. - DomainBeaconAttester [4]byte `yaml:"DOMAIN_ATTESTATION"` // DomainBeaconAttester defines the BLS signature domain for attestation verification. - DomainDeposit [4]byte `yaml:"DOMAIN_DEPOSIT"` // DomainDeposit defines the BLS signature domain for deposit verification. - DomainVoluntaryExit [4]byte `yaml:"DOMAIN_VOLUNTARY_EXIT"` // DomainVoluntaryExit defines the BLS signature domain for exit verification. + DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification. + DomainRandao [4]byte `yaml:"DOMAIN_RANDAO"` // DomainRandao defines the BLS signature domain for randao verification. + DomainBeaconAttester [4]byte `yaml:"DOMAIN_ATTESTATION"` // DomainBeaconAttester defines the BLS signature domain for attestation verification. + DomainDeposit [4]byte `yaml:"DOMAIN_DEPOSIT"` // DomainDeposit defines the BLS signature domain for deposit verification. + DomainVoluntaryExit [4]byte `yaml:"DOMAIN_VOLUNTARY_EXIT"` // DomainVoluntaryExit defines the BLS signature domain for exit verification. + DomainSelectionProof [4]byte `yaml:"DOMAIN_SELECTION_PROOF"` // DomainSelectionProof defines the BLS signature domain for selection proof. + DomainAggregateAndProof [4]byte `yaml:"DOMAIN_AGGREGATE_AND_PROOF"` // DomainAggregateAndProof defines the BLS signature domain for aggregate and proof. // Prysm constants. GweiPerEth uint64 // GweiPerEth is the amount of gwei corresponding to 1 eth. @@ -90,7 +95,6 @@ type BeaconChainConfig struct { WithdrawalPrivkeyFileName string // WithdrawalPrivKeyFileName specifies the string name of a withdrawal private key file. RPCSyncCheck time.Duration // Number of seconds to query the sync service, to find out if the node is synced or not. GoerliBlockTime uint64 // GoerliBlockTime is the number of seconds on avg a Goerli block is created. - GenesisForkVersion []byte `yaml:"GENESIS_FORK_VERSION"` // GenesisForkVersion is used to track fork version between state transitions. EmptySignature [96]byte // EmptySignature is used to represent a zeroed out BLS Signature. DefaultPageSize int // DefaultPageSize defines the default page size for RPC server request. MaxPeersToSync int // MaxPeersToSync describes the limit for number of peers in round robin sync. @@ -99,6 +103,12 @@ type BeaconChainConfig struct { // Slasher constants. WeakSubjectivityPeriod uint64 // WeakSubjectivityPeriod defines the time period expressed in number of epochs were proof of stake network should validate block headers and attestations for slashable events. PruneSlasherStoragePeriod uint64 // PruneSlasherStoragePeriod defines the time period expressed in number of epochs were proof of stake network should prune attestation and block header store. + + // Fork-related values. + GenesisForkVersion []byte `yaml:"GENESIS_FORK_VERSION"` // GenesisForkVersion is used to track fork version between state transitions. + NextForkVersion []byte `yaml:"NEXT_FORK_VERSION"` // NextForkVersion is used to track the upcoming fork version, if any. + NextForkEpoch uint64 `yaml:"NEXT_FORK_EPOCH"` // NextForkEpoch is used to track the epoch of the next fork, if any. + ForkVersionSchedule map[uint64][]byte // Schedule of fork versions by epoch number. } var defaultBeaconConfig = &BeaconChainConfig{ @@ -118,6 +128,9 @@ var defaultBeaconConfig = &BeaconChainConfig{ MinGenesisActiveValidatorCount: 16384, MinGenesisTime: 0, // Zero until a proper time is decided. TargetAggregatorsPerCommittee: 16, + HysteresisQuotient: 4, + HysteresisDownwardMultiplier: 1, + HysteresisUpwardMultiplier: 5, // Gwei value constants. MinDepositAmount: 1 * 1e9, @@ -135,7 +148,7 @@ var defaultBeaconConfig = &BeaconChainConfig{ SlotsPerEpoch: 32, MinSeedLookahead: 1, MaxSeedLookahead: 4, - SlotsPerEth1VotingPeriod: 1024, + EpochsPerEth1VotingPeriod: 32, SlotsPerHistoricalRoot: 8192, MinValidatorWithdrawabilityDelay: 256, PersistentCommitteePeriod: 2048, @@ -143,6 +156,7 @@ var defaultBeaconConfig = &BeaconChainConfig{ Eth1FollowDistance: 1024, SafeSlotsToUpdateJustified: 8, AttestationPropagationSlotRange: 32, + SecondsPerETH1Block: 14, // State list length constants. EpochsPerHistoricalVector: 65536, @@ -165,11 +179,13 @@ var defaultBeaconConfig = &BeaconChainConfig{ MaxVoluntaryExits: 16, // BLS domain values. - DomainBeaconProposer: bytesutil.ToBytes4(bytesutil.Bytes4(0)), - DomainBeaconAttester: bytesutil.ToBytes4(bytesutil.Bytes4(1)), - DomainRandao: bytesutil.ToBytes4(bytesutil.Bytes4(2)), - DomainDeposit: bytesutil.ToBytes4(bytesutil.Bytes4(3)), - DomainVoluntaryExit: bytesutil.ToBytes4(bytesutil.Bytes4(4)), + DomainBeaconProposer: bytesutil.ToBytes4(bytesutil.Bytes4(0)), + DomainBeaconAttester: bytesutil.ToBytes4(bytesutil.Bytes4(1)), + DomainRandao: bytesutil.ToBytes4(bytesutil.Bytes4(2)), + DomainDeposit: bytesutil.ToBytes4(bytesutil.Bytes4(3)), + DomainVoluntaryExit: bytesutil.ToBytes4(bytesutil.Bytes4(4)), + DomainSelectionProof: bytesutil.ToBytes4(bytesutil.Bytes4(5)), + DomainAggregateAndProof: bytesutil.ToBytes4(bytesutil.Bytes4(6)), // Prysm constants. GweiPerEth: 1000000000, @@ -182,7 +198,6 @@ var defaultBeaconConfig = &BeaconChainConfig{ ValidatorPrivkeyFileName: "/validatorprivatekey", RPCSyncCheck: 1, GoerliBlockTime: 14, // 14 seconds on average for a goerli block to be created. - GenesisForkVersion: []byte{0, 0, 0, 0}, EmptySignature: [96]byte{}, DefaultPageSize: 250, MaxPeersToSync: 15, @@ -191,6 +206,14 @@ var defaultBeaconConfig = &BeaconChainConfig{ // Slasher related values. WeakSubjectivityPeriod: 54000, PruneSlasherStoragePeriod: 10, + + // Fork related values. + GenesisForkVersion: []byte{0, 0, 0, 0}, + NextForkVersion: []byte{0, 0, 0, 0}, // Set to GenesisForkVersion unless there is a scheduled fork + NextForkEpoch: 1<<64 - 1, // Set to FarFutureEpoch unless there is a scheduled fork. + ForkVersionSchedule: map[uint64][]byte{ + // Any further forks must be specified here by their epoch number. + }, } var beaconConfig = defaultBeaconConfig @@ -206,24 +229,6 @@ func MainnetConfig() *BeaconChainConfig { return defaultBeaconConfig } -// DemoBeaconConfig retrieves the demo beacon chain config. This is mainnet config with 1/10th of -// mainnet deposit values. -func DemoBeaconConfig() *BeaconChainConfig { - demoConfig := *MainnetConfig() - - demoConfig.MinDepositAmount /= 10 - demoConfig.MaxEffectiveBalance /= 10 - demoConfig.EjectionBalance /= 10 - demoConfig.EffectiveBalanceIncrement /= 10 - - demoConfig.InactivityPenaltyQuotient /= 10 - - // Increment this number after a full testnet tear down. - demoConfig.GenesisForkVersion = []byte{0, 0, 0, 4} - - return &demoConfig -} - // MinimalSpecConfig retrieves the minimal config used in spec tests. func MinimalSpecConfig() *BeaconChainConfig { minimalConfig := *defaultBeaconConfig @@ -254,13 +259,14 @@ func MinimalSpecConfig() *BeaconChainConfig { minimalConfig.SlotsPerEpoch = 8 minimalConfig.MinSeedLookahead = 1 minimalConfig.MaxSeedLookahead = 4 - minimalConfig.SlotsPerEth1VotingPeriod = 16 + minimalConfig.EpochsPerEth1VotingPeriod = 2 minimalConfig.SlotsPerHistoricalRoot = 64 minimalConfig.MinValidatorWithdrawabilityDelay = 256 - minimalConfig.PersistentCommitteePeriod = 2048 + minimalConfig.PersistentCommitteePeriod = 128 minimalConfig.MinEpochsToInactivityPenalty = 4 minimalConfig.Eth1FollowDistance = 16 minimalConfig.SafeSlotsToUpdateJustified = 2 + minimalConfig.SecondsPerETH1Block = 14 // State vector lengths minimalConfig.EpochsPerHistoricalVector = 64 @@ -288,17 +294,13 @@ func MinimalSpecConfig() *BeaconChainConfig { minimalConfig.DomainRandao = bytesutil.ToBytes4(bytesutil.Bytes4(2)) minimalConfig.DomainDeposit = bytesutil.ToBytes4(bytesutil.Bytes4(3)) minimalConfig.DomainVoluntaryExit = bytesutil.ToBytes4(bytesutil.Bytes4(4)) + minimalConfig.GenesisForkVersion = []byte{0, 0, 0, 1} minimalConfig.DepositContractTreeDepth = 32 minimalConfig.FarFutureEpoch = 1<<64 - 1 return &minimalConfig } -// UseDemoBeaconConfig for beacon chain services. -func UseDemoBeaconConfig() { - beaconConfig = DemoBeaconConfig() -} - // UseMinimalConfig for beacon chain services. func UseMinimalConfig() { beaconConfig = MinimalSpecConfig() diff --git a/shared/testutil/BUILD.bazel b/shared/testutil/BUILD.bazel index 8321320d37..cca45f10bc 100644 --- a/shared/testutil/BUILD.bazel +++ b/shared/testutil/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "helpers.go", "log.go", "spectest.go", + "state.go", "tempdir.go", "wait_timeout.go", ], @@ -18,6 +19,7 @@ go_library( "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/state:go_default_library", "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stateutil:go_default_library", "//proto/beacon/p2p/v1:go_default_library", "//shared/bls:go_default_library", "//shared/bytesutil:go_default_library", @@ -44,14 +46,15 @@ go_test( "block_test.go", "deposits_test.go", "helpers_test.go", + "state_test.go", ], embed = [":go_default_library"], deps = [ "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/state:go_default_library", "//beacon-chain/core/state/stateutils:go_default_library", + "//proto/beacon/p2p/v1:go_default_library", "//shared/bytesutil:go_default_library", "//shared/params:go_default_library", - "@com_github_prysmaticlabs_go_ssz//:go_default_library", ], ) diff --git a/shared/testutil/block.go b/shared/testutil/block.go index 37abe35258..a53032689f 100644 --- a/shared/testutil/block.go +++ b/shared/testutil/block.go @@ -10,10 +10,10 @@ import ( "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/go-ssz" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/params" @@ -112,7 +112,7 @@ func GenerateFullBlock( return nil, err } newHeader.StateRoot = prevStateRoot[:] - parentRoot, err := ssz.HashTreeRoot(newHeader) + parentRoot, err := stateutil.BlockHeaderRoot(newHeader) if err != nil { return nil, err } @@ -131,9 +131,15 @@ func GenerateFullBlock( return nil, err } + idx, err := helpers.BeaconProposerIndex(bState) + if err != nil { + return nil, err + } + block := ðpb.BeaconBlock{ - Slot: slot, - ParentRoot: parentRoot[:], + Slot: slot, + ParentRoot: parentRoot[:], + ProposerIndex: idx, Body: ðpb.BeaconBlockBody{ Eth1Data: eth1Data, RandaoReveal: reveal, @@ -164,37 +170,42 @@ func GenerateProposerSlashingForValidator( ) (*ethpb.ProposerSlashing, error) { header1 := ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: bState.Slot(), - BodyRoot: bytesutil.PadTo([]byte{0, 1, 0}, 32), + ProposerIndex: idx, + Slot: bState.Slot(), + BodyRoot: bytesutil.PadTo([]byte{0, 1, 0}, 32), + StateRoot: make([]byte, 32), + ParentRoot: make([]byte, 32), }, } - root, err := ssz.HashTreeRoot(header1.Header) - if err != nil { - return nil, err - } currentEpoch := helpers.CurrentEpoch(bState) - domain, err := helpers.Domain(bState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer) + domain, err := helpers.Domain(bState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorRoot()) if err != nil { return nil, err } - header1.Signature = priv.Sign(root[:], domain).Marshal() + root, err := helpers.ComputeSigningRoot(header1.Header, domain) + if err != nil { + return nil, err + } + header1.Signature = priv.Sign(root[:]).Marshal() header2 := ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: bState.Slot(), - BodyRoot: bytesutil.PadTo([]byte{0, 2, 0}, 32), + ProposerIndex: idx, + Slot: bState.Slot(), + BodyRoot: bytesutil.PadTo([]byte{0, 2, 0}, 32), + StateRoot: make([]byte, 32), + ParentRoot: make([]byte, 32), }, } - root, err = ssz.HashTreeRoot(header2.Header) + root, err = helpers.ComputeSigningRoot(header2.Header, domain) if err != nil { return nil, err } - header2.Signature = priv.Sign(root[:], domain).Marshal() + header2.Signature = priv.Sign(root[:]).Marshal() return ðpb.ProposerSlashing{ - ProposerIndex: idx, - Header_1: header1, - Header_2: header2, + Header_1: header1, + Header_2: header2, }, nil } @@ -241,15 +252,15 @@ func GenerateAttesterSlashingForValidator( }, AttestingIndices: []uint64{idx}, } - dataRoot, err := ssz.HashTreeRoot(att1.Data) + domain, err := helpers.Domain(bState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, bState.GenesisValidatorRoot()) if err != nil { return nil, err } - domain, err := helpers.Domain(bState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester) + dataRoot, err := helpers.ComputeSigningRoot(att1.Data, domain) if err != nil { return nil, err } - sig := priv.Sign(dataRoot[:], domain) + sig := priv.Sign(dataRoot[:]) att1.Signature = bls.AggregateSignatures([]*bls.Signature{sig}).Marshal() att2 := ðpb.IndexedAttestation{ @@ -267,11 +278,11 @@ func GenerateAttesterSlashingForValidator( }, AttestingIndices: []uint64{idx}, } - dataRoot, err = ssz.HashTreeRoot(att2.Data) + dataRoot, err = helpers.ComputeSigningRoot(att2.Data, domain) if err != nil { return nil, err } - sig = priv.Sign(dataRoot[:], domain) + sig = priv.Sign(dataRoot[:]) att2.Signature = bls.AggregateSignatures([]*bls.Signature{sig}).Marshal() return ðpb.AttesterSlashing{ @@ -387,7 +398,7 @@ func GenerateAttestations(bState *stateTrie.BeaconState, privs []*bls.SecretKey, ) } - domain, err := helpers.Domain(bState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester) + domain, err := helpers.Domain(bState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, bState.GenesisValidatorRoot()) if err != nil { return nil, err } @@ -408,7 +419,7 @@ func GenerateAttestations(bState *stateTrie.BeaconState, privs []*bls.SecretKey, }, } - dataRoot, err := ssz.HashTreeRoot(attData) + dataRoot, err := helpers.ComputeSigningRoot(attData, domain) if err != nil { return nil, err } @@ -420,7 +431,7 @@ func GenerateAttestations(bState *stateTrie.BeaconState, privs []*bls.SecretKey, sigs := []*bls.Signature{} for b := i; b < i+bitsPerAtt; b++ { aggregationBits.SetBitAt(b, true) - sigs = append(sigs, privs[committee[b]].Sign(dataRoot[:], domain)) + sigs = append(sigs, privs[committee[b]].Sign(dataRoot[:])) } // bls.AggregateSignatures will return nil if sigs is 0. @@ -478,15 +489,15 @@ func generateVoluntaryExits( ValidatorIndex: valIndex, }, } - root, err := ssz.HashTreeRoot(exit.Exit) + domain, err := helpers.Domain(bState.Fork(), currentEpoch, params.BeaconConfig().DomainVoluntaryExit, bState.GenesisValidatorRoot()) if err != nil { return nil, err } - domain, err := helpers.Domain(bState.Fork(), currentEpoch, params.BeaconConfig().DomainVoluntaryExit) + root, err := helpers.ComputeSigningRoot(exit.Exit, domain) if err != nil { return nil, err } - exit.Signature = privs[valIndex].Sign(root[:], domain).Marshal() + exit.Signature = privs[valIndex].Sign(root[:]).Marshal() voluntaryExits[i] = exit } return voluntaryExits, nil diff --git a/shared/testutil/block_test.go b/shared/testutil/block_test.go index 78d1322036..29b2d4967a 100644 --- a/shared/testutil/block_test.go +++ b/shared/testutil/block_test.go @@ -91,7 +91,7 @@ func TestGenerateFullBlock_ValidProposerSlashings(t *testing.T) { t.Fatal(err) } - slashableIndice := block.Block.Body.ProposerSlashings[0].ProposerIndex + slashableIndice := block.Block.Body.ProposerSlashings[0].Header_1.Header.ProposerIndex if val, err := beaconState.ValidatorAtIndexReadOnly(slashableIndice); err != nil || !val.Slashed() { if err != nil { t.Fatal(err) diff --git a/shared/testutil/deposits.go b/shared/testutil/deposits.go index 89af3dc367..0c47c44e4b 100644 --- a/shared/testutil/deposits.go +++ b/shared/testutil/deposits.go @@ -7,8 +7,10 @@ import ( "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/hashutil" @@ -64,12 +66,19 @@ func DeterministicDepositsAndKeys(numDeposits uint64) ([]*ethpb.Deposit, []*bls. WithdrawalCredentials: withdrawalCreds[:], } - domain := bls.ComputeDomain(params.BeaconConfig().DomainDeposit) + domain, err := helpers.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil) + if err != nil { + return nil, nil, errors.Wrap(err, "could not compute domain") + } root, err := ssz.SigningRoot(depositData) if err != nil { return nil, nil, errors.Wrap(err, "could not get signing root of deposit data") } - depositData.Signature = secretKeys[i].Sign(root[:], domain).Marshal() + sigRoot, err := ssz.HashTreeRoot(&pb.SigningRoot{ObjectRoot: root[:], Domain: domain}) + if err != nil { + return nil, nil, err + } + depositData.Signature = secretKeys[i].Sign(sigRoot[:]).Marshal() deposit := ðpb.Deposit{ Data: depositData, @@ -156,6 +165,7 @@ func DeterministicGenesisState(t testing.TB, numValidators uint64) (*stateTrie.B if err != nil { t.Fatal(errors.Wrapf(err, "failed to get genesis beacon state of %d validators", numValidators)) } + ResetCache() return beaconState, privKeys } @@ -228,13 +238,20 @@ func DeterministicDepositsAndKeysSameValidator(numDeposits uint64) ([]*ethpb.Dep WithdrawalCredentials: withdrawalCreds[:], } - domain := bls.ComputeDomain(params.BeaconConfig().DomainDeposit) + domain, err := helpers.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil) + if err != nil { + return nil, nil, errors.Wrap(err, "could not compute domain") + } root, err := ssz.SigningRoot(depositData) if err != nil { return nil, nil, errors.Wrap(err, "could not get signing root of deposit data") } + sigRoot, err := ssz.HashTreeRoot(&pb.SigningRoot{ObjectRoot: root[:], Domain: domain}) + if err != nil { + return nil, nil, errors.Wrap(err, "could not get signing root of deposit data and domain") + } // Always use the same validator to sign - depositData.Signature = secretKeys[1].Sign(root[:], domain).Marshal() + depositData.Signature = secretKeys[1].Sign(sigRoot[:]).Marshal() deposit := ðpb.Deposit{ Data: depositData, diff --git a/shared/testutil/deposits_test.go b/shared/testutil/deposits_test.go index 05ac9bbe2e..1fa0b7d820 100644 --- a/shared/testutil/deposits_test.go +++ b/shared/testutil/deposits_test.go @@ -8,7 +8,9 @@ import ( ) func TestSetupInitialDeposits_1024Entries(t *testing.T) { + t.Skip("To be resolved until 5119 gets in") entries := 1 + ResetCache() deposits, privKeys, err := DeterministicDepositsAndKeys(uint64(entries)) if err != nil { t.Fatal(err) @@ -32,16 +34,20 @@ func TestSetupInitialDeposits_1024Entries(t *testing.T) { if !bytes.Equal(deposits[0].Data.WithdrawalCredentials, expectedWithdrawalCredentialsAt0) { t.Fatalf("incorrect withdrawal credentials, wanted %x but received %x", expectedWithdrawalCredentialsAt0, deposits[0].Data.WithdrawalCredentials) } - expectedDepositDataRootAt0 := []byte{0xc2, 0x58, 0x8b, 0xb0, 0x44, 0xf5, 0xe8, 0xaf, 0xb9, 0xb1, 0xcc, 0xb7, 0xe0, 0x83, 0x30, 0x35, 0x83, 0x18, 0xf2, 0x56, 0x27, 0x96, 0xfa, 0xad, 0xce, 0x92, 0x03, 0x50, 0x64, 0xaa, 0xf1, 0x3d} + + expectedDepositDataRootAt0 := []byte{0xe4, 0x55, 0x29, 0x79, 0x1d, 0x1e, 0xbd, 0x81, 0x2a, 0x48, 0x42, 0xe3, 0x94, 0xc0, 0x38, 0xf3, 0x24, 0x2a, 0xe2, 0x3b, 0xae, 0xa4, 0xf8, 0x0b, 0x74, 0xf4, 0xbd, 0xdd, 0xbe, 0xbb, 0xd6, 0x22} if !bytes.Equal(depositDataRoots[0][:], expectedDepositDataRootAt0) { t.Fatalf("incorrect deposit data root, wanted %x but received %x", expectedDepositDataRootAt0, depositDataRoots[0]) } - expectedSignatureAt0 := []byte{0xb3, 0xb9, 0x6e, 0xba, 0x50, 0xfa, 0x47, 0x49, 0x26, 0xfa, 0x46, 0xbb, 0xea, 0x3c, 0x8c, 0x73, 0x4c, 0x85, 0xc9, 0x70, 0x4e, 0x54, 0xb7, 0x19, 0xe5, 0x4e, 0x1b, 0xc5, 0x83, 0x77, 0xdd, 0x00, 0x30, 0x0b, 0x9e, 0xe4, 0xb0, 0x5b, 0xb2, 0x7b, 0x81, 0x8b, 0x38, 0xeb, 0xa2, 0x89, 0xcb, 0xe0, 0x06, 0x7a, 0x34, 0x56, 0xbc, 0xb8, 0xad, 0x59, 0xd0, 0x17, 0xfc, 0xf0, 0x04, 0xe5, 0xf1, 0xc5, 0xff, 0x1b, 0xf2, 0xe4, 0x89, 0x6b, 0x53, 0x2f, 0x4a, 0xea, 0x4b, 0x4c, 0x47, 0x06, 0x9a, 0x26, 0xe3, 0x85, 0x98, 0xf3, 0xd3, 0x37, 0x04, 0x7b, 0x8d, 0x0b, 0xd5, 0x25, 0xe4, 0x9f, 0xfc, 0xd2} + + expectedSignatureAt0 := []byte{0xa7, 0xe5, 0x53, 0xde, 0x1b, 0x2a, 0x0c, 0x6f, 0x9f, 0xe1, 0x01, 0x44, 0x9d, 0x54, 0x55, 0x27, 0x55, 0xc6, 0x51, 0x01, 0xe2, 0x77, 0x0d, 0xfe, 0x10, 0xda, 0x1c, 0x34, 0x29, 0xc4, 0xc9, 0x94, 0x98, 0xd6, 0x69, 0x10, 0x09, 0x57, 0x91, 0x05, 0x89, 0xc0, 0x82, 0xdf, 0x53, 0x0f, 0x37, 0x49, 0x12, 0x1c, 0xc0, 0x70, 0x3b, 0x57, + 0x0f, 0x61, 0x7d, 0x39, 0x89, 0x35, 0xef, 0x8f, 0x4e, 0xe0, 0x25, 0x8b, 0xac, 0x17, 0xa6, 0xb4, 0x43, 0xe9, 0xba, 0xdd, 0x12, 0x80, 0x5b, 0x50, 0xa6, 0xe2, 0xab, 0x93, 0x80, 0xaf, 0x79, 0xe1, 0x7e, 0x14, 0x75, 0x94, 0x7c, 0xe3, 0xf3, 0x52, 0x7e, 0xf8} if !bytes.Equal(deposits[0].Data.Signature, expectedSignatureAt0) { t.Fatalf("incorrect signature, wanted %x but received %x", expectedSignatureAt0, deposits[0].Data.Signature) } entries = 1024 + ResetCache() deposits, privKeys, err = DeterministicDepositsAndKeys(uint64(entries)) if err != nil { t.Fatal(err) @@ -77,11 +83,11 @@ func TestSetupInitialDeposits_1024Entries(t *testing.T) { if !bytes.Equal(deposits[1023].Data.WithdrawalCredentials, expectedWithdrawalCredentialsAt1023) { t.Fatalf("incorrect withdrawal credentials, wanted %x but received %x", expectedWithdrawalCredentialsAt1023, deposits[1023].Data.WithdrawalCredentials) } - expectedDepositDataRootAt1023 := []byte{0x54, 0x45, 0x80, 0xf3, 0xc3, 0x87, 0xdd, 0xfb, 0x1f, 0xf7, 0x03, 0xab, 0x15, 0xc9, 0x5b, 0x56, 0x2c, 0x29, 0x04, 0x7b, 0x17, 0xb4, 0xa0, 0x19, 0x69, 0xd6, 0x45, 0x7d, 0xec, 0x4e, 0x87, 0xfc} + expectedDepositDataRootAt1023 := []byte{0xec, 0xd3, 0x60, 0x05, 0x49, 0xdc, 0x62, 0xb3, 0xe9, 0x55, 0xb3, 0x7e, 0x40, 0xab, 0x17, 0x9b, 0x5c, 0x62, 0x02, 0x99, 0xa9, 0x59, 0x5c, 0x99, 0xc8, 0xd3, 0xd5, 0xed, 0xc1, 0x51, 0xc8, 0x54} if !bytes.Equal(depositDataRoots[1023][:], expectedDepositDataRootAt1023) { t.Fatalf("incorrect deposit data root, wanted %x but received %x", expectedDepositDataRootAt1023, depositDataRoots[1023]) } - expectedSignatureAt1023 := []byte{0xa2, 0xad, 0x23, 0x3b, 0x6d, 0xa0, 0xd9, 0xf8, 0xb4, 0xac, 0xe0, 0xc9, 0xae, 0x25, 0x81, 0xfb, 0xca, 0x2d, 0x0a, 0xed, 0x6a, 0xdc, 0xd6, 0xda, 0x49, 0x0a, 0x75, 0xab, 0x3a, 0x3c, 0xc6, 0x37, 0xec, 0x65, 0xe3, 0x3d, 0xbc, 0x00, 0xad, 0xd8, 0x5f, 0x1e, 0x7b, 0x93, 0xcd, 0x63, 0x74, 0x8e, 0x0c, 0x28, 0x60, 0x4f, 0x99, 0x33, 0x6a, 0x29, 0x21, 0x57, 0xb6, 0xe0, 0x45, 0x9f, 0xaa, 0x10, 0xe9, 0x78, 0x02, 0x01, 0x68, 0x65, 0xcf, 0x6a, 0x4c, 0x2a, 0xd5, 0x5f, 0x37, 0xa1, 0x66, 0x05, 0x2b, 0x55, 0x86, 0xe7, 0x68, 0xb7, 0xfd, 0x76, 0xd5, 0x91, 0x3e, 0xeb, 0x6e, 0x46, 0x3f, 0x6d} + expectedSignatureAt1023 := []byte{0xac, 0xd2, 0xa6, 0x84, 0x07, 0x73, 0x23, 0x18, 0x8b, 0x77, 0x71, 0xd0, 0x0c, 0x98, 0x76, 0x1e, 0xc8, 0x88, 0xd0, 0x0e, 0x6c, 0xee, 0xe0, 0x55, 0xa4, 0x18, 0x98, 0x18, 0xb9, 0x02, 0x75, 0xee, 0xd3, 0xe6, 0xe1, 0x36, 0xed, 0x08, 0xfa, 0x19, 0x2f, 0x79, 0x12, 0x58, 0x11, 0x99, 0x50, 0x9a, 0x19, 0x78, 0x6b, 0x50, 0x14, 0xb2, 0x4b, 0x7f, 0xf5, 0xee, 0x3d, 0x96, 0xc8, 0xfc, 0x9e, 0x6b, 0x79, 0x63, 0x39, 0x2d, 0xb4, 0x74, 0xb0, 0xca, 0xb7, 0xd9, 0x21, 0x0d, 0x90, 0x79, 0x0c, 0xa7, 0x6b, 0xe9, 0x80, 0x3a, 0xe7, 0x54, 0x47, 0x38, 0xec, 0x5a, 0x71, 0xfc, 0x89, 0x03, 0x2e, 0xd1} if !bytes.Equal(deposits[1023].Data.Signature, expectedSignatureAt1023) { t.Fatalf("incorrect signature, wanted %x but received %x", expectedSignatureAt1023, deposits[1023].Data.Signature) } diff --git a/shared/testutil/helpers.go b/shared/testutil/helpers.go index 2dc34f8c7c..d05dd01ccc 100644 --- a/shared/testutil/helpers.go +++ b/shared/testutil/helpers.go @@ -8,8 +8,6 @@ import ( "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/state" stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" @@ -26,12 +24,16 @@ func RandaoReveal(beaconState *stateTrie.BeaconState, epoch uint64, privKeys []* } buf := make([]byte, 32) binary.LittleEndian.PutUint64(buf, epoch) - domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao) + domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot()) if err != nil { return nil, err } + root, err := helpers.ComputeSigningRoot(epoch, domain) + if err != nil { + return nil, errors.Wrap(err, "could not compute signing root of epoch") + } // We make the previous validator's index sign the message instead of the proposer. - epochSignature := privKeys[proposerIdx].Sign(buf, domain) + epochSignature := privKeys[proposerIdx].Sign(root[:]) return epochSignature.Marshal(), nil } @@ -47,8 +49,11 @@ func BlockSignature( return nil, err } block.StateRoot = s[:] - - blockRoot, err := ssz.HashTreeRoot(block) + domain, err := helpers.Domain(bState.Fork(), helpers.CurrentEpoch(bState), params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorRoot()) + if err != nil { + return nil, err + } + blockRoot, err := helpers.ComputeSigningRoot(block, domain) if err != nil { return nil, err } @@ -62,14 +67,10 @@ func BlockSignature( if err != nil { return nil, err } - domain, err := helpers.Domain(bState.Fork(), helpers.CurrentEpoch(bState), params.BeaconConfig().DomainBeaconProposer) - if err != nil { - return nil, err - } if err := bState.SetSlot(currentSlot); err != nil { return nil, err } - return privKeys[proposerIdx].Sign(blockRoot[:], domain), nil + return privKeys[proposerIdx].Sign(blockRoot[:]), nil } // Random32Bytes generates a random 32 byte slice. diff --git a/shared/testutil/helpers_test.go b/shared/testutil/helpers_test.go index 5c4cff2756..7b14db524f 100644 --- a/shared/testutil/helpers_test.go +++ b/shared/testutil/helpers_test.go @@ -5,7 +5,6 @@ import ( "encoding/binary" "testing" - "github.com/prysmaticlabs/go-ssz" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/shared/params" ) @@ -24,19 +23,21 @@ func TestBlockSignature(t *testing.T) { if err != nil { t.Error(err) } + if err := beaconState.SetSlot(beaconState.Slot() - 1); err != nil { - t.Fatal(err) - } - signingRoot, err := ssz.HashTreeRoot(block.Block) - if err != nil { t.Error(err) } epoch := helpers.SlotToEpoch(block.Block.Slot) - domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainBeaconProposer) + domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) if err != nil { t.Fatal(err) } - blockSig := privKeys[proposerIdx].Sign(signingRoot[:], domain).Marshal() + signingRoot, err := helpers.ComputeSigningRoot(block.Block, domain) + if err != nil { + t.Error(err) + } + + blockSig := privKeys[proposerIdx].Sign(signingRoot[:]).Marshal() signature, err := BlockSignature(beaconState, block.Block, privKeys) if err != nil { @@ -63,12 +64,16 @@ func TestRandaoReveal(t *testing.T) { } buf := make([]byte, 32) binary.LittleEndian.PutUint64(buf, epoch) - domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao) + domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot()) + if err != nil { + t.Fatal(err) + } + root, err := helpers.ComputeSigningRoot(epoch, domain) if err != nil { t.Fatal(err) } // We make the previous validator's index sign the message instead of the proposer. - epochSignature := privKeys[proposerIdx].Sign(buf, domain).Marshal() + epochSignature := privKeys[proposerIdx].Sign(root[:]).Marshal() if !bytes.Equal(randaoReveal[:], epochSignature[:]) { t.Errorf("Expected randao reveals to be equal, received %#x != %#x", randaoReveal[:], epochSignature[:]) diff --git a/shared/testutil/spectest.go b/shared/testutil/spectest.go index 32e4d03cc0..6bc5bb56a9 100644 --- a/shared/testutil/spectest.go +++ b/shared/testutil/spectest.go @@ -116,8 +116,8 @@ func RunBlockOperationTest( t.Fatalf("Failed to unmarshal: %v", err) } - if !proto.Equal(beaconState.CloneInnerState(), postBeaconState) { - diff, _ := messagediff.PrettyDiff(beaconState, postBeaconState) + if !proto.Equal(beaconState.InnerStateUnsafe(), postBeaconState) { + diff, _ := messagediff.PrettyDiff(beaconState.InnerStateUnsafe(), postBeaconState) t.Log(diff) t.Fatal("Post state does not match expected") } diff --git a/shared/testutil/state.go b/shared/testutil/state.go new file mode 100644 index 0000000000..6ff25932f9 --- /dev/null +++ b/shared/testutil/state.go @@ -0,0 +1,53 @@ +package testutil + +import ( + ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/go-bitfield" + stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" + "github.com/prysmaticlabs/prysm/shared/params" +) + +var st, _ = stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{ + BlockRoots: filledByteSlice2D(params.BeaconConfig().SlotsPerHistoricalRoot, 32), + StateRoots: filledByteSlice2D(params.BeaconConfig().SlotsPerHistoricalRoot, 32), + Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector), + RandaoMixes: filledByteSlice2D(params.BeaconConfig().EpochsPerHistoricalVector, 32), + Validators: make([]*ethpb.Validator, 0), + CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + Eth1Data: ðpb.Eth1Data{ + DepositRoot: make([]byte, 32), + BlockHash: make([]byte, 32), + }, + Fork: &pb.Fork{ + PreviousVersion: make([]byte, 4), + CurrentVersion: make([]byte, 4), + }, + Eth1DataVotes: make([]*ethpb.Eth1Data, 0), + HistoricalRoots: make([][]byte, 0), + JustificationBits: bitfield.Bitvector4{0x0}, + FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + LatestBlockHeader: ðpb.BeaconBlockHeader{ + ParentRoot: make([]byte, 32), + StateRoot: make([]byte, 32), + BodyRoot: make([]byte, 32), + }, + PreviousEpochAttestations: make([]*pb.PendingAttestation, 0), + CurrentEpochAttestations: make([]*pb.PendingAttestation, 0), + PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, +}) + +// NewBeaconState creates a beacon state with minimum marshalable fields. +func NewBeaconState() *stateTrie.BeaconState { + return st.Copy() +} + +// SSZ will fill 2D byte slices with their respective values, so we must fill these in too for round +// trip testing. +func filledByteSlice2D(len, innerLen uint64) [][]byte { + b := make([][]byte, len) + for i := uint64(0); i < len; i++ { + b[i] = make([]byte, innerLen) + } + return b +} diff --git a/shared/testutil/state_test.go b/shared/testutil/state_test.go new file mode 100644 index 0000000000..7aa4ed4b75 --- /dev/null +++ b/shared/testutil/state_test.go @@ -0,0 +1,23 @@ +package testutil + +import ( + "reflect" + "testing" + + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" +) + +func TestNewBeaconState(t *testing.T) { + st := NewBeaconState() + b, err := st.InnerStateUnsafe().MarshalSSZ() + if err != nil { + t.Fatal(err) + } + got := &pb.BeaconState{} + if err := got.UnmarshalSSZ(b); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(st.InnerStateUnsafe(), got) { + t.Fatal("State did not match after round trip marshal") + } +} diff --git a/slasher/beaconclient/BUILD.bazel b/slasher/beaconclient/BUILD.bazel index 499ea9adfd..6759712b4d 100644 --- a/slasher/beaconclient/BUILD.bazel +++ b/slasher/beaconclient/BUILD.bazel @@ -30,7 +30,9 @@ go_library( "@io_opencensus_go//plugin/ocgrpc:go_default_library", "@io_opencensus_go//trace:go_default_library", "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", "@org_golang_google_grpc//credentials:go_default_library", + "@org_golang_google_grpc//status:go_default_library", ], ) diff --git a/slasher/beaconclient/receivers.go b/slasher/beaconclient/receivers.go index 545d4a695e..6cbd0c5faf 100644 --- a/slasher/beaconclient/receivers.go +++ b/slasher/beaconclient/receivers.go @@ -2,6 +2,7 @@ package beaconclient import ( "context" + "errors" "io" "time" @@ -9,8 +10,14 @@ import ( ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/sirupsen/logrus" "go.opencensus.io/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) +// reconnectPeriod is the frequency that we try to restart our +// streams when the beacon chain is node does not respond. +var reconnectPeriod = 5 * time.Second + // receiveBlocks starts a gRPC client stream listener to obtain // blocks from the beacon node. Upon receiving a block, the service // broadcasts it to a feed for other services in slasher to subscribe to. @@ -34,7 +41,23 @@ func (bs *Service) receiveBlocks(ctx context.Context) { return } if err != nil { - log.WithError(err).Error("Could not receive block from beacon node") + if e, ok := status.FromError(err); ok { + switch e.Code() { + case codes.Canceled: + stream, err = bs.restartBlockStream(ctx) + if err != nil { + log.WithError(err).Error("Could not restart stream") + return + } + break + default: + log.WithError(err).Errorf("Could not receive block from beacon node. rpc status: %v", e.Code()) + return + } + } else { + log.WithError(err).Error("Could not receive blocks from beacon node") + return + } } if res == nil { continue @@ -70,8 +93,23 @@ func (bs *Service) receiveAttestations(ctx context.Context) { return } if err != nil { - log.WithError(err).Error("Could not receive attestations from beacon node") - continue + if e, ok := status.FromError(err); ok { + switch e.Code() { + case codes.Canceled: + stream, err = bs.restartIndexedAttestationStream(ctx) + if err != nil { + log.WithError(err).Error("Could not restart stream") + return + } + break + default: + log.WithError(err).Errorf("Could not receive attestations from beacon node. rpc status: %v", e.Code()) + return + } + } else { + log.WithError(err).Error("Could not receive attestations from beacon node") + return + } } if res == nil { continue @@ -119,3 +157,43 @@ func (bs *Service) collectReceivedAttestations(ctx context.Context) { } } } + +func (bs *Service) restartIndexedAttestationStream(ctx context.Context) (ethpb.BeaconChain_StreamIndexedAttestationsClient, error) { + ticker := time.NewTicker(reconnectPeriod) + for { + select { + case <-ticker.C: + log.Info("Context closed, attempting to restart attestation stream") + stream, err := bs.beaconClient.StreamIndexedAttestations(ctx, &ptypes.Empty{}) + if err != nil { + continue + } + log.Info("Attestation stream restarted...") + return stream, nil + case <-ctx.Done(): + log.Debug("Context closed, exiting reconnect routine") + return nil, errors.New("context closed, no longer attempting to restart stream") + } + } + +} + +func (bs *Service) restartBlockStream(ctx context.Context) (ethpb.BeaconChain_StreamBlocksClient, error) { + ticker := time.NewTicker(reconnectPeriod) + for { + select { + case <-ticker.C: + log.Info("Context closed, attempting to restart block stream") + stream, err := bs.beaconClient.StreamBlocks(ctx, &ptypes.Empty{}) + if err != nil { + continue + } + log.Info("Block stream restarted...") + return stream, nil + case <-ctx.Done(): + log.Debug("Context closed, exiting reconnect routine") + return nil, errors.New("context closed, no longer attempting to restart stream") + } + } + +} diff --git a/slasher/beaconclient/submit_test.go b/slasher/beaconclient/submit_test.go index 8cceedbdb3..f9d2463c28 100644 --- a/slasher/beaconclient/submit_test.go +++ b/slasher/beaconclient/submit_test.go @@ -24,16 +24,17 @@ func TestService_SubscribeDetectedProposerSlashings(t *testing.T) { } slashing := ðpb.ProposerSlashing{ - ProposerIndex: 5, Header_1: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 5, + ProposerIndex: 5, + Slot: 5, }, Signature: make([]byte, 96), }, Header_2: ðpb.SignedBeaconBlockHeader{ Header: ðpb.BeaconBlockHeader{ - Slot: 5, + ProposerIndex: 5, + Slot: 5, }, Signature: make([]byte, 96), }, diff --git a/slasher/db/iface/interface.go b/slasher/db/iface/interface.go index 00c81a2771..441d18732a 100644 --- a/slasher/db/iface/interface.go +++ b/slasher/db/iface/interface.go @@ -50,8 +50,8 @@ type WriteAccessDatabase interface { SetLatestEpochDetected(ctx context.Context, epoch uint64) error // BlockHeader related methods. - SaveBlockHeader(ctx context.Context, validatorID uint64, blockHeader *ethpb.SignedBeaconBlockHeader) error - DeleteBlockHeader(ctx context.Context, validatorID uint64, blockHeader *ethpb.SignedBeaconBlockHeader) error + SaveBlockHeader(ctx context.Context, blockHeader *ethpb.SignedBeaconBlockHeader) error + DeleteBlockHeader(ctx context.Context, blockHeader *ethpb.SignedBeaconBlockHeader) error PruneBlockHistory(ctx context.Context, currentEpoch uint64, pruningEpochAge uint64) error // IndexedAttestations related methods. diff --git a/slasher/db/kv/block_header.go b/slasher/db/kv/block_header.go index ee498fdf0f..f841e69a17 100644 --- a/slasher/db/kv/block_header.go +++ b/slasher/db/kv/block_header.go @@ -26,15 +26,15 @@ func unmarshalBlockHeader(ctx context.Context, enc []byte) (*ethpb.SignedBeaconB return protoBlockHeader, nil } -// BlockHeaders accepts an epoch and validator id and returns the corresponding block header array. +// BlockHeaders accepts an slot and validator id and returns the corresponding block header array. // Returns nil if the block header for those values does not exist. -func (db *Store) BlockHeaders(ctx context.Context, epoch uint64, validatorID uint64) ([]*ethpb.SignedBeaconBlockHeader, error) { +func (db *Store) BlockHeaders(ctx context.Context, slot uint64, validatorID uint64) ([]*ethpb.SignedBeaconBlockHeader, error) { ctx, span := trace.StartSpan(ctx, "slasherDB.BlockHeaders") defer span.End() var blockHeaders []*ethpb.SignedBeaconBlockHeader err := db.view(func(tx *bolt.Tx) error { c := tx.Bucket(historicBlockHeadersBucket).Cursor() - prefix := encodeEpochValidatorID(epoch, validatorID) + prefix := encodeSlotValidatorID(slot, validatorID) for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { bh, err := unmarshalBlockHeader(ctx, v) if err != nil { @@ -47,11 +47,11 @@ func (db *Store) BlockHeaders(ctx context.Context, epoch uint64, validatorID uin return blockHeaders, err } -// HasBlockHeader accepts an epoch and validator id and returns true if the block header exists. -func (db *Store) HasBlockHeader(ctx context.Context, epoch uint64, validatorID uint64) bool { +// HasBlockHeader accepts a slot and validator id and returns true if the block header exists. +func (db *Store) HasBlockHeader(ctx context.Context, slot uint64, validatorID uint64) bool { ctx, span := trace.StartSpan(ctx, "slasherDB.HasBlockHeader") defer span.End() - prefix := encodeEpochValidatorID(epoch, validatorID) + prefix := encodeSlotValidatorID(slot, validatorID) var hasBlockHeader bool if err := db.view(func(tx *bolt.Tx) error { c := tx.Bucket(historicBlockHeadersBucket).Cursor() @@ -69,11 +69,11 @@ func (db *Store) HasBlockHeader(ctx context.Context, epoch uint64, validatorID u } // SaveBlockHeader accepts a block header and writes it to disk. -func (db *Store) SaveBlockHeader(ctx context.Context, validatorID uint64, blockHeader *ethpb.SignedBeaconBlockHeader) error { +func (db *Store) SaveBlockHeader(ctx context.Context, blockHeader *ethpb.SignedBeaconBlockHeader) error { ctx, span := trace.StartSpan(ctx, "slasherDB.SaveBlockHeader") defer span.End() epoch := helpers.SlotToEpoch(blockHeader.Header.Slot) - key := encodeEpochValidatorIDSig(epoch, validatorID, blockHeader.Signature) + key := encodeSlotValidatorIDSig(blockHeader.Header.Slot, blockHeader.Header.ProposerIndex, blockHeader.Signature) enc, err := proto.Marshal(blockHeader) if err != nil { return errors.Wrap(err, "failed to encode block") @@ -98,12 +98,11 @@ func (db *Store) SaveBlockHeader(ctx context.Context, validatorID uint64, blockH return nil } -// DeleteBlockHeader deletes a block header using the epoch and validator id. -func (db *Store) DeleteBlockHeader(ctx context.Context, validatorID uint64, blockHeader *ethpb.SignedBeaconBlockHeader) error { +// DeleteBlockHeader deletes a block header using the slot and validator id. +func (db *Store) DeleteBlockHeader(ctx context.Context, blockHeader *ethpb.SignedBeaconBlockHeader) error { ctx, span := trace.StartSpan(ctx, "slasherDB.DeleteBlockHeader") defer span.End() - epoch := helpers.SlotToEpoch(blockHeader.Header.Slot) - key := encodeEpochValidatorIDSig(epoch, validatorID, blockHeader.Signature) + key := encodeSlotValidatorIDSig(blockHeader.Header.Slot, blockHeader.Header.ProposerIndex, blockHeader.Signature) return db.update(func(tx *bolt.Tx) error { bucket := tx.Bucket(historicBlockHeadersBucket) if err := bucket.Delete(key); err != nil { @@ -121,10 +120,11 @@ func (db *Store) PruneBlockHistory(ctx context.Context, currentEpoch uint64, pru if pruneTill <= 0 { return nil } + pruneTillSlot := uint64(pruneTill) * params.BeaconConfig().SlotsPerEpoch return db.update(func(tx *bolt.Tx) error { bucket := tx.Bucket(historicBlockHeadersBucket) c := tx.Bucket(historicBlockHeadersBucket).Cursor() - for k, _ := c.First(); k != nil && bytesutil.FromBytes8(k[:8]) <= uint64(pruneTill); k, _ = c.Next() { + for k, _ := c.First(); k != nil && bytesutil.FromBytes8(k[:8]) <= pruneTillSlot; k, _ = c.Next() { if err := bucket.Delete(k); err != nil { return errors.Wrap(err, "failed to delete the block header from historical bucket") } diff --git a/slasher/db/kv/block_header_test.go b/slasher/db/kv/block_header_test.go index 658b03eecf..4eddf86798 100644 --- a/slasher/db/kv/block_header_test.go +++ b/slasher/db/kv/block_header_test.go @@ -19,15 +19,15 @@ func TestNilDBHistoryBlkHdr(t *testing.T) { defer teardownDB(t, db) ctx := context.Background() - epoch := uint64(1) + slot := uint64(1) validatorID := uint64(1) - hasBlockHeader := db.HasBlockHeader(ctx, epoch, validatorID) + hasBlockHeader := db.HasBlockHeader(ctx, slot, validatorID) if hasBlockHeader { t.Fatal("HasBlockHeader should return false") } - bPrime, err := db.BlockHeaders(ctx, epoch, validatorID) + bPrime, err := db.BlockHeaders(ctx, slot, validatorID) if err != nil { t.Fatalf("failed to get block: %v", err) } @@ -43,30 +43,29 @@ func TestSaveHistoryBlkHdr(t *testing.T) { ctx := context.Background() tests := []struct { - vID uint64 - bh *ethpb.SignedBeaconBlockHeader + bh *ethpb.SignedBeaconBlockHeader }{ { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in"), Header: ðpb.BeaconBlockHeader{Slot: 0}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in"), Header: ðpb.BeaconBlockHeader{Slot: 0, ProposerIndex: 0}}, }, { - vID: uint64(1), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 2nd"), Header: ðpb.BeaconBlockHeader{Slot: 0}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 2nd"), Header: ðpb.BeaconBlockHeader{Slot: 0, ProposerIndex: 1}}, }, { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 3rd"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch + 1}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 3rd"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch + 1, ProposerIndex: 0}}, + }, + { + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 3rd"), Header: ðpb.BeaconBlockHeader{Slot: 1, ProposerIndex: 0}}, }, } for _, tt := range tests { - err := db.SaveBlockHeader(ctx, tt.vID, tt.bh) + err := db.SaveBlockHeader(ctx, tt.bh) if err != nil { t.Fatalf("save block failed: %v", err) } - bha, err := db.BlockHeaders(ctx, helpers.SlotToEpoch(tt.bh.Header.Slot), tt.vID) + bha, err := db.BlockHeaders(ctx, tt.bh.Header.Slot, tt.bh.Header.ProposerIndex) if err != nil { t.Fatalf("failed to get block: %v", err) } @@ -86,32 +85,28 @@ func TestDeleteHistoryBlkHdr(t *testing.T) { ctx := context.Background() tests := []struct { - vID uint64 - bh *ethpb.SignedBeaconBlockHeader + bh *ethpb.SignedBeaconBlockHeader }{ { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in"), Header: ðpb.BeaconBlockHeader{Slot: 0}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in"), Header: ðpb.BeaconBlockHeader{Slot: 0, ProposerIndex: 0}}, }, { - vID: uint64(1), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 2nd"), Header: ðpb.BeaconBlockHeader{Slot: 0}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 2nd"), Header: ðpb.BeaconBlockHeader{Slot: 0, ProposerIndex: 1}}, }, { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 3rd"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch + 1}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 3rd"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch + 1, ProposerIndex: 0}}, }, } for _, tt := range tests { - err := db.SaveBlockHeader(ctx, tt.vID, tt.bh) + err := db.SaveBlockHeader(ctx, tt.bh) if err != nil { t.Fatalf("save block failed: %v", err) } } for _, tt := range tests { - bha, err := db.BlockHeaders(ctx, helpers.SlotToEpoch(tt.bh.Header.Slot), tt.vID) + bha, err := db.BlockHeaders(ctx, tt.bh.Header.Slot, tt.bh.Header.ProposerIndex) if err != nil { t.Fatalf("failed to get block: %v", err) } @@ -119,11 +114,11 @@ func TestDeleteHistoryBlkHdr(t *testing.T) { if bha == nil || !reflect.DeepEqual(bha[0], tt.bh) { t.Fatalf("get should return bh: %v", bha) } - err = db.DeleteBlockHeader(ctx, tt.vID, tt.bh) + err = db.DeleteBlockHeader(ctx, tt.bh) if err != nil { t.Fatalf("save block failed: %v", err) } - bh, err := db.BlockHeaders(ctx, helpers.SlotToEpoch(tt.bh.Header.Slot), tt.vID) + bh, err := db.BlockHeaders(ctx, tt.bh.Header.Slot, tt.bh.Header.ProposerIndex) if err != nil { t.Fatal(err) @@ -144,40 +139,39 @@ func TestHasHistoryBlkHdr(t *testing.T) { ctx := context.Background() tests := []struct { - vID uint64 - bh *ethpb.SignedBeaconBlockHeader + bh *ethpb.SignedBeaconBlockHeader }{ { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in"), Header: ðpb.BeaconBlockHeader{Slot: 0}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in"), Header: ðpb.BeaconBlockHeader{Slot: 0, ProposerIndex: 0}}, }, { - vID: uint64(1), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 2nd"), Header: ðpb.BeaconBlockHeader{Slot: 0}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 2nd"), Header: ðpb.BeaconBlockHeader{Slot: 0, ProposerIndex: 1}}, }, { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 3rd"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch + 1}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 3rd"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch + 1, ProposerIndex: 0}}, + }, + { + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 4th"), Header: ðpb.BeaconBlockHeader{Slot: 1, ProposerIndex: 0}}, }, } for _, tt := range tests { - found := db.HasBlockHeader(ctx, helpers.SlotToEpoch(tt.bh.Header.Slot), tt.vID) + found := db.HasBlockHeader(ctx, tt.bh.Header.Slot, tt.bh.Header.ProposerIndex) if found { t.Fatal("has block header should return false for block headers that are not in db") } - err := db.SaveBlockHeader(ctx, tt.vID, tt.bh) + err := db.SaveBlockHeader(ctx, tt.bh) if err != nil { t.Fatalf("save block failed: %v", err) } } for _, tt := range tests { - err := db.SaveBlockHeader(ctx, tt.vID, tt.bh) + err := db.SaveBlockHeader(ctx, tt.bh) if err != nil { t.Fatalf("save block failed: %v", err) } - found := db.HasBlockHeader(ctx, helpers.SlotToEpoch(tt.bh.Header.Slot), tt.vID) + found := db.HasBlockHeader(ctx, tt.bh.Header.Slot, tt.bh.Header.ProposerIndex) if !found { t.Fatal("has block header should return true") @@ -193,38 +187,32 @@ func TestPruneHistoryBlkHdr(t *testing.T) { ctx := context.Background() tests := []struct { - vID uint64 - bh *ethpb.SignedBeaconBlockHeader + bh *ethpb.SignedBeaconBlockHeader }{ { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in"), Header: ðpb.BeaconBlockHeader{Slot: 0}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in"), Header: ðpb.BeaconBlockHeader{Slot: 0, ProposerIndex: 0}}, }, { - vID: uint64(1), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 2nd"), Header: ðpb.BeaconBlockHeader{Slot: 0}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 2nd"), Header: ðpb.BeaconBlockHeader{Slot: 0, ProposerIndex: 1}}, }, { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 3rd"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch + 1}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 3rd"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch + 1, ProposerIndex: 0}}, }, { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 4th"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch*2 + 1}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 4th"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch*2 + 1, ProposerIndex: 0}}, }, { - vID: uint64(0), - bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 5th"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch*3 + 1}}, + bh: ðpb.SignedBeaconBlockHeader{Signature: []byte("let me in 5th"), Header: ðpb.BeaconBlockHeader{Slot: params.BeaconConfig().SlotsPerEpoch*3 + 1, ProposerIndex: 0}}, }, } for _, tt := range tests { - err := db.SaveBlockHeader(ctx, tt.vID, tt.bh) + err := db.SaveBlockHeader(ctx, tt.bh) if err != nil { t.Fatalf("save block header failed: %v", err) } - bha, err := db.BlockHeaders(ctx, helpers.SlotToEpoch(tt.bh.Header.Slot), tt.vID) + bha, err := db.BlockHeaders(ctx, tt.bh.Header.Slot, tt.bh.Header.ProposerIndex) if err != nil { t.Fatalf("failed to get block header: %v", err) } @@ -241,11 +229,11 @@ func TestPruneHistoryBlkHdr(t *testing.T) { } for _, tt := range tests { - bha, err := db.BlockHeaders(ctx, helpers.SlotToEpoch(tt.bh.Header.Slot), tt.vID) + bha, err := db.BlockHeaders(ctx, tt.bh.Header.Slot, tt.bh.Header.ProposerIndex) if err != nil { t.Fatalf("failed to get block header: %v", err) } - if helpers.SlotToEpoch(tt.bh.Header.Slot) > currentEpoch-historyToKeep { + if helpers.SlotToEpoch(tt.bh.Header.Slot) >= currentEpoch-historyToKeep { if bha == nil || !reflect.DeepEqual(bha[0], tt.bh) { t.Fatalf("get should return bh: %v", bha) } diff --git a/slasher/db/kv/proposer_slashings_test.go b/slasher/db/kv/proposer_slashings_test.go index 4626aeafe2..0d793da02c 100644 --- a/slasher/db/kv/proposer_slashings_test.go +++ b/slasher/db/kv/proposer_slashings_test.go @@ -20,7 +20,7 @@ func TestStore_ProposerSlashingNilBucket(t *testing.T) { defer teardownDB(t, db) ctx := context.Background() - ps := ðpb.ProposerSlashing{ProposerIndex: 1} + ps := ðpb.ProposerSlashing{Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 1}}} has, _, err := db.HasProposerSlashing(ctx, ps) if err != nil { t.Fatalf("HasProposerSlashing should not return error: %v", err) @@ -45,22 +45,30 @@ func TestStore_SaveProposerSlashing(t *testing.T) { defer teardownDB(t, db) ctx := context.Background() - hdr := ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{}} tests := []struct { ss types.SlashingStatus ps *ethpb.ProposerSlashing }{ { ss: types.Active, - ps: ðpb.ProposerSlashing{ProposerIndex: 1, Header_1: hdr, Header_2: hdr}, + ps: ðpb.ProposerSlashing{ + Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 1}}, + Header_2: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 1}}, + }, }, { ss: types.Included, - ps: ðpb.ProposerSlashing{ProposerIndex: 2, Header_1: hdr, Header_2: hdr}, + ps: ðpb.ProposerSlashing{ + Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 2}}, + Header_2: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 2}}, + }, }, { ss: types.Reverted, - ps: ðpb.ProposerSlashing{ProposerIndex: 3, Header_1: hdr, Header_2: hdr}, + ps: ðpb.ProposerSlashing{ + Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 3}}, + Header_2: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 3}}, + }, }, } @@ -97,15 +105,15 @@ func TestStore_UpdateProposerSlashingStatus(t *testing.T) { }{ { ss: types.Active, - ps: ðpb.ProposerSlashing{ProposerIndex: 1}, + ps: ðpb.ProposerSlashing{Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 1}}}, }, { ss: types.Active, - ps: ðpb.ProposerSlashing{ProposerIndex: 2}, + ps: ðpb.ProposerSlashing{Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 2}}}, }, { ss: types.Active, - ps: ðpb.ProposerSlashing{ProposerIndex: 3}, + ps: ðpb.ProposerSlashing{Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 3}}}, }, } @@ -151,11 +159,19 @@ func TestStore_SaveProposerSlashings(t *testing.T) { defer teardownDB(t, db) ctx := context.Background() - hdr := ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{}} ps := []*ethpb.ProposerSlashing{ - {ProposerIndex: 1, Header_1: hdr, Header_2: hdr}, - {ProposerIndex: 2, Header_1: hdr, Header_2: hdr}, - {ProposerIndex: 3, Header_1: hdr, Header_2: hdr}, + { + Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 1}}, + Header_2: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 1}}, + }, + { + Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 2}}, + Header_2: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 2}}, + }, + { + Header_1: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 3}}, + Header_2: ðpb.SignedBeaconBlockHeader{Header: ðpb.BeaconBlockHeader{ProposerIndex: 3}}, + }, } err := db.SaveProposerSlashings(ctx, types.Active, ps) if err != nil { @@ -166,7 +182,7 @@ func TestStore_SaveProposerSlashings(t *testing.T) { t.Fatalf("Failed to get proposer slashings: %v", err) } sort.SliceStable(proposerSlashings, func(i, j int) bool { - return proposerSlashings[i].ProposerIndex < proposerSlashings[j].ProposerIndex + return proposerSlashings[i].Header_1.Header.ProposerIndex < proposerSlashings[j].Header_1.Header.ProposerIndex }) if proposerSlashings == nil || !reflect.DeepEqual(proposerSlashings, ps) { diff, _ := messagediff.PrettyDiff(proposerSlashings, ps) diff --git a/slasher/db/kv/schema.go b/slasher/db/kv/schema.go index ebea7b2100..afff28ae5a 100644 --- a/slasher/db/kv/schema.go +++ b/slasher/db/kv/schema.go @@ -28,12 +28,12 @@ var ( validatorsMinMaxSpanBucket = []byte("validators-min-max-span-bucket") ) -func encodeEpochValidatorID(epoch uint64, validatorID uint64) []byte { - return append(bytesutil.Bytes8(epoch), bytesutil.Bytes8(validatorID)...) +func encodeSlotValidatorID(slot uint64, validatorID uint64) []byte { + return append(bytesutil.Bytes8(slot), bytesutil.Bytes8(validatorID)...) } -func encodeEpochValidatorIDSig(epoch uint64, validatorID uint64, sig []byte) []byte { - return append(append(bytesutil.Bytes8(epoch), bytesutil.Bytes8(validatorID)...), sig...) +func encodeSlotValidatorIDSig(slot uint64, validatorID uint64, sig []byte) []byte { + return append(append(bytesutil.Bytes8(slot), bytesutil.Bytes8(validatorID)...), sig...) } func encodeEpochSig(targetEpoch uint64, sig []byte) []byte { diff --git a/slasher/detection/BUILD.bazel b/slasher/detection/BUILD.bazel index 951224399a..d7f1ffd911 100644 --- a/slasher/detection/BUILD.bazel +++ b/slasher/detection/BUILD.bazel @@ -27,6 +27,7 @@ go_library( "@com_github_prometheus_client_golang//prometheus:go_default_library", "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", + "@com_github_prysmaticlabs_go_ssz//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", "@io_opencensus_go//trace:go_default_library", ], @@ -46,6 +47,8 @@ go_test( "//slasher/db/testing:go_default_library", "//slasher/db/types:go_default_library", "//slasher/detection/attestations:go_default_library", + "//slasher/detection/proposals:go_default_library", + "//slasher/detection/testing:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", "@com_github_sirupsen_logrus//hooks/test:go_default_library", diff --git a/slasher/detection/detect.go b/slasher/detection/detect.go index a3197cfa0e..fac98deae7 100644 --- a/slasher/detection/detect.go +++ b/slasher/detection/detect.go @@ -1,6 +1,7 @@ package detection import ( + "bytes" "context" "github.com/gogo/protobuf/proto" @@ -163,6 +164,15 @@ func (ds *Service) DetectDoubleProposals(ctx context.Context, incomingBlock *eth return ds.proposalsDetector.DetectDoublePropose(ctx, incomingBlock) } +func isDoublePropose( + incomingBlockHeader *ethpb.SignedBeaconBlockHeader, + prevBlockHeader *ethpb.SignedBeaconBlockHeader, +) bool { + return incomingBlockHeader.Header.ProposerIndex == prevBlockHeader.Header.ProposerIndex && + !bytes.Equal(incomingBlockHeader.Signature, prevBlockHeader.Signature) && + incomingBlockHeader.Header.Slot == prevBlockHeader.Header.Slot +} + func isDoubleVote(incomingAtt *ethpb.IndexedAttestation, prevAtt *ethpb.IndexedAttestation) bool { return !proto.Equal(incomingAtt.Data, prevAtt.Data) && incomingAtt.Data.Target.Epoch == prevAtt.Data.Target.Epoch } diff --git a/slasher/detection/detect_test.go b/slasher/detection/detect_test.go index aab8eb6f84..4cb101fd17 100644 --- a/slasher/detection/detect_test.go +++ b/slasher/detection/detect_test.go @@ -2,6 +2,7 @@ package detection import ( "context" + "reflect" "testing" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" @@ -9,6 +10,8 @@ import ( testDB "github.com/prysmaticlabs/prysm/slasher/db/testing" status "github.com/prysmaticlabs/prysm/slasher/db/types" "github.com/prysmaticlabs/prysm/slasher/detection/attestations" + "github.com/prysmaticlabs/prysm/slasher/detection/proposals" + testDetect "github.com/prysmaticlabs/prysm/slasher/detection/testing" ) func TestDetect_detectAttesterSlashings_Surround(t *testing.T) { @@ -341,3 +344,80 @@ func TestDetect_detectAttesterSlashings_Double(t *testing.T) { }) } } + +func TestDetect_detectProposerSlashing(t *testing.T) { + type testStruct struct { + name string + blk *ethpb.SignedBeaconBlockHeader + incomingBlk *ethpb.SignedBeaconBlockHeader + slashing *ethpb.ProposerSlashing + } + blk1slot0, err := testDetect.SignedBlockHeader(testDetect.StartSlot(0), 0) + if err != nil { + t.Fatal(err) + } + blk2slot0, err := testDetect.SignedBlockHeader(testDetect.StartSlot(0), 0) + if err != nil { + t.Fatal(err) + } + blk1epoch1, err := testDetect.SignedBlockHeader(testDetect.StartSlot(1), 0) + if err != nil { + t.Fatal(err) + } + tests := []testStruct{ + { + name: "same block sig dont slash", + blk: blk1slot0, + incomingBlk: blk1slot0, + slashing: nil, + }, + { + name: "block from different epoch dont slash", + blk: blk1slot0, + incomingBlk: blk1epoch1, + slashing: nil, + }, + { + name: "different sig from same slot slash", + blk: blk1slot0, + incomingBlk: blk2slot0, + slashing: ðpb.ProposerSlashing{Header_1: blk2slot0, Header_2: blk1slot0}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := testDB.SetupSlasherDB(t, false) + defer testDB.TeardownSlasherDB(t, db) + ctx := context.Background() + ds := Service{ + ctx: ctx, + slasherDB: db, + proposalsDetector: proposals.NewProposeDetector(db), + } + if err := db.SaveBlockHeader(ctx, tt.blk); err != nil { + t.Fatal(err) + } + + slashing, err := ds.proposalsDetector.DetectDoublePropose(ctx, tt.incomingBlk) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(slashing, tt.slashing) { + t.Errorf("Wanted: %v, received %v", tt.slashing, slashing) + } + savedSlashings, err := db.ProposalSlashingsByStatus(ctx, status.Active) + if tt.slashing != nil && len(savedSlashings) != 1 { + t.Fatalf("Did not save slashing to db") + } + + if slashing != nil && !isDoublePropose(slashing.Header_1, slashing.Header_2) { + t.Fatalf( + "Expected slashing to be valid, received atts with target epoch %v and %v but not valid", + slashing.Header_1, + slashing.Header_2, + ) + } + + }) + } +} diff --git a/slasher/detection/listeners.go b/slasher/detection/listeners.go index 81b99e821f..89a519300f 100644 --- a/slasher/detection/listeners.go +++ b/slasher/detection/listeners.go @@ -9,7 +9,9 @@ package detection import ( "context" + "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/go-ssz" "go.opencensus.io/trace" ) @@ -24,9 +26,14 @@ func (ds *Service) detectIncomingBlocks(ctx context.Context, ch chan *ethpb.Sign defer sub.Unsubscribe() for { select { - case <-ch: + case sblk := <-ch: log.Debug("Running detection on block...") - // TODO(#4836): Run detection function for proposer slashings. + sbh, err := signedBeaconBlockHeaderFromBlock(sblk) + if err != nil { + log.WithError(err) + } + slashing, err := ds.proposalsDetector.DetectDoublePropose(ctx, sbh) + ds.submitProposerSlashing(ctx, slashing) case <-sub.Err(): log.Error("Subscriber closed, exiting goroutine") return @@ -69,3 +76,20 @@ func (ds *Service) detectIncomingAttestations(ctx context.Context, ch chan *ethp } } } + +func signedBeaconBlockHeaderFromBlock(block *ethpb.SignedBeaconBlock) (*ethpb.SignedBeaconBlockHeader, error) { + bodyRoot, err := ssz.HashTreeRoot(block.Block.Body) + if err != nil { + return nil, errors.Wrap(err, "Failed to get signing root of block") + } + return ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ + Slot: block.Block.Slot, + ProposerIndex: block.Block.ProposerIndex, + ParentRoot: block.Block.ParentRoot, + StateRoot: block.Block.StateRoot, + BodyRoot: bodyRoot[:], + }, + Signature: block.Signature, + }, nil +} diff --git a/slasher/detection/listeners_test.go b/slasher/detection/listeners_test.go index 219b959d48..33378ea2df 100644 --- a/slasher/detection/listeners_test.go +++ b/slasher/detection/listeners_test.go @@ -8,7 +8,9 @@ import ( ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/prysm/shared/event" "github.com/prysmaticlabs/prysm/shared/testutil" + testDB "github.com/prysmaticlabs/prysm/slasher/db/testing" "github.com/prysmaticlabs/prysm/slasher/detection/attestations" + "github.com/prysmaticlabs/prysm/slasher/detection/proposals" "github.com/sirupsen/logrus" logTest "github.com/sirupsen/logrus/hooks/test" ) @@ -34,8 +36,11 @@ func (m *mockNotifier) ClientReadyFeed() *event.Feed { func TestService_DetectIncomingBlocks(t *testing.T) { hook := logTest.NewGlobal() + db := testDB.SetupSlasherDB(t, false) + defer testDB.TeardownSlasherDB(t, db) ds := Service{ - notifier: &mockNotifier{}, + notifier: &mockNotifier{}, + proposalsDetector: proposals.NewProposeDetector(db), } blk := ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{Slot: 1}, diff --git a/slasher/detection/proposals/BUILD.bazel b/slasher/detection/proposals/BUILD.bazel index 6eb9550290..1f4e5239fb 100644 --- a/slasher/detection/proposals/BUILD.bazel +++ b/slasher/detection/proposals/BUILD.bazel @@ -6,7 +6,6 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/slasher/detection/proposals", visibility = ["//visibility:public"], deps = [ - "//beacon-chain/core/helpers:go_default_library", "//slasher/db:go_default_library", "//slasher/db/types:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", @@ -19,9 +18,9 @@ go_test( srcs = ["detector_test.go"], embed = [":go_default_library"], deps = [ - "//shared/params:go_default_library", "//slasher/db/testing:go_default_library", "//slasher/detection/proposals/iface:go_default_library", + "//slasher/detection/testing:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", ], ) diff --git a/slasher/detection/proposals/detector.go b/slasher/detection/proposals/detector.go index 8db0747fb8..5253006d59 100644 --- a/slasher/detection/proposals/detector.go +++ b/slasher/detection/proposals/detector.go @@ -5,7 +5,6 @@ import ( "context" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/slasher/db" status "github.com/prysmaticlabs/prysm/slasher/db/types" "go.opencensus.io/trace" @@ -31,11 +30,7 @@ func (dd *ProposeDetector) DetectDoublePropose( ) (*ethpb.ProposerSlashing, error) { ctx, span := trace.StartSpan(ctx, "detector.DetectDoublePropose") defer span.End() - epoch := helpers.SlotToEpoch(incomingBlk.Header.Slot) - //TODO(#5119) remove constand and use input from block header. - //validatorIdx:=blk.Header.ProposerIndex - proposerIdx := uint64(0) - bha, err := dd.slasherDB.BlockHeaders(ctx, epoch, proposerIdx) + bha, err := dd.slasherDB.BlockHeaders(ctx, incomingBlk.Header.Slot, incomingBlk.Header.ProposerIndex) if err != nil { return nil, err } @@ -43,7 +38,7 @@ func (dd *ProposeDetector) DetectDoublePropose( if bytes.Equal(bh.Signature, incomingBlk.Signature) { continue } - ps := ðpb.ProposerSlashing{ProposerIndex: proposerIdx, Header_1: incomingBlk, Header_2: bh} + ps := ðpb.ProposerSlashing{Header_1: incomingBlk, Header_2: bh} err := dd.slasherDB.SaveProposerSlashing(ctx, status.Active, ps) if err != nil { return nil, err diff --git a/slasher/detection/proposals/detector_test.go b/slasher/detection/proposals/detector_test.go index d6c89882f6..741ec18b24 100644 --- a/slasher/detection/proposals/detector_test.go +++ b/slasher/detection/proposals/detector_test.go @@ -2,14 +2,13 @@ package proposals import ( "context" - "crypto/rand" "reflect" "testing" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/prysm/shared/params" testDB "github.com/prysmaticlabs/prysm/slasher/db/testing" "github.com/prysmaticlabs/prysm/slasher/detection/proposals/iface" + testDetect "github.com/prysmaticlabs/prysm/slasher/detection/testing" ) var _ = iface.ProposalsDetector(&ProposeDetector{}) @@ -21,40 +20,46 @@ func TestProposalsDetector_DetectSlashingsForBlockHeaders(t *testing.T) { incomingBlk *ethpb.SignedBeaconBlockHeader slashing *ethpb.ProposerSlashing } - blk1epoch0, err := signedBlockHeader(startSlot(0), 0) + blk1slot0, err := testDetect.SignedBlockHeader(testDetect.StartSlot(0), 0) if err != nil { t.Fatal(err) } - blk2epoch0, err := signedBlockHeader(startSlot(0)+1, 0) + blk2slot0, err := testDetect.SignedBlockHeader(testDetect.StartSlot(0), 0) if err != nil { t.Fatal(err) } - blk1epoch1, err := signedBlockHeader(startSlot(1), 0) + blk1slot1, err := testDetect.SignedBlockHeader(testDetect.StartSlot(0)+1, 0) + if err != nil { + t.Fatal(err) + } + blk1epoch1, err := testDetect.SignedBlockHeader(testDetect.StartSlot(1), 0) if err != nil { t.Fatal(err) } - //blk1epoch3, err := signedBlockHeader(startSlot(3), 0) - //if err != nil { - // t.Fatal(err) - //} tests := []testStruct{ { name: "same block sig dont slash", - blk: blk1epoch0, - incomingBlk: blk1epoch0, + blk: blk1slot0, + incomingBlk: blk1slot0, slashing: nil, }, { name: "block from different epoch dont slash", - blk: blk1epoch0, + blk: blk1slot0, incomingBlk: blk1epoch1, slashing: nil, }, { - name: "different sig from same epoch slash", - blk: blk1epoch0, - incomingBlk: blk2epoch0, - slashing: ðpb.ProposerSlashing{ProposerIndex: 0, Header_1: blk2epoch0, Header_2: blk1epoch0}, + name: "different sig from different slot dont slash", + blk: blk1slot0, + incomingBlk: blk1slot1, + slashing: nil, + }, + { + name: "different sig from same slot slash", + blk: blk1slot0, + incomingBlk: blk2slot0, + slashing: ðpb.ProposerSlashing{Header_1: blk2slot0, Header_2: blk1slot0}, }, } @@ -68,7 +73,7 @@ func TestProposalsDetector_DetectSlashingsForBlockHeaders(t *testing.T) { slasherDB: db, } - if err := sd.slasherDB.SaveBlockHeader(ctx, 0, tt.blk); err != nil { + if err := sd.slasherDB.SaveBlockHeader(ctx, tt.blk); err != nil { t.Fatal(err) } @@ -84,31 +89,3 @@ func TestProposalsDetector_DetectSlashingsForBlockHeaders(t *testing.T) { }) } } - -func signedBlockHeader(slot uint64, proposerIdx uint64) (*ethpb.SignedBeaconBlockHeader, error) { - sig, err := genRandomSig() - if err != nil { - return nil, err - } - root := [32]byte{1, 2, 3} - return ðpb.SignedBeaconBlockHeader{ - Header: ðpb.BeaconBlockHeader{ - //ProposerIndex proposerIndex, - Slot: slot, - ParentRoot: root[:], - StateRoot: root[:], - BodyRoot: root[:], - }, - Signature: sig, - }, nil -} - -func genRandomSig() (blk []byte, err error) { - blk = make([]byte, 96) - _, err = rand.Read(blk) - return -} - -func startSlot(epoch uint64) uint64 { - return epoch * params.BeaconConfig().SlotsPerEpoch -} diff --git a/slasher/detection/service.go b/slasher/detection/service.go index 987592649a..5bc2ee14ab 100644 --- a/slasher/detection/service.go +++ b/slasher/detection/service.go @@ -93,6 +93,7 @@ func (ds *Service) Start() { // our gRPC client to keep detecting slashable offenses. go ds.detectIncomingBlocks(ds.ctx, ds.blocksChan) go ds.detectIncomingAttestations(ds.ctx, ds.attsChan) + go ds.detectHistoricalChainData(ds.ctx) } func (ds *Service) detectHistoricalChainData(ctx context.Context) { @@ -161,3 +162,17 @@ func (ds *Service) submitAttesterSlashings(ctx context.Context, slashings []*eth } } } + +func (ds *Service) submitProposerSlashing(ctx context.Context, slashing *ethpb.ProposerSlashing) { + ctx, span := trace.StartSpan(ctx, "detection.submitProposerSlashing") + defer span.End() + if slashing != nil && slashing.Header_1 != nil && slashing.Header_2 != nil { + log.WithFields(logrus.Fields{ + "header1Slot": slashing.Header_1.Header.Slot, + "header2Slot": slashing.Header_2.Header.Slot, + "proposerIdxHeader1": slashing.Header_1.Header.ProposerIndex, + "proposerIdxHeader2": slashing.Header_2.Header.ProposerIndex, + }).Info("Found a proposer slashing! Submitting to beacon node") + ds.proposerSlashingsFeed.Send(slashing) + } +} diff --git a/slasher/detection/testing/BUILD.bazel b/slasher/detection/testing/BUILD.bazel new file mode 100644 index 0000000000..73aacd54ed --- /dev/null +++ b/slasher/detection/testing/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["utils.go"], + importpath = "github.com/prysmaticlabs/prysm/slasher/detection/testing", + visibility = ["//visibility:public"], + deps = [ + "//shared/params:go_default_library", + "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", + ], +) diff --git a/slasher/detection/testing/utils.go b/slasher/detection/testing/utils.go new file mode 100644 index 0000000000..843f7b121c --- /dev/null +++ b/slasher/detection/testing/utils.go @@ -0,0 +1,39 @@ +package testing + +import ( + "crypto/rand" + + ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/prysm/shared/params" +) + +// SignedBlockHeader given slot, proposer index this function generates signed block header. +// with random bytes as its signature. +func SignedBlockHeader(slot uint64, proposerIdx uint64) (*ethpb.SignedBeaconBlockHeader, error) { + sig, err := genRandomSig() + if err != nil { + return nil, err + } + root := [32]byte{1, 2, 3} + return ðpb.SignedBeaconBlockHeader{ + Header: ðpb.BeaconBlockHeader{ + ProposerIndex: proposerIdx, + Slot: slot, + ParentRoot: root[:], + StateRoot: root[:], + BodyRoot: root[:], + }, + Signature: sig, + }, nil +} + +func genRandomSig() ([]byte, error) { + blk := make([]byte, 96) + _, err := rand.Read(blk) + return blk, err +} + +// StartSlot returns the first slot of a given epoch. +func StartSlot(epoch uint64) uint64 { + return epoch * params.BeaconConfig().SlotsPerEpoch +} diff --git a/third_party/com_github_prysmaticlabs_ethereumapis-tags.patch b/third_party/com_github_prysmaticlabs_ethereumapis-tags.patch index 56a67c2420..aacc057e99 100644 --- a/third_party/com_github_prysmaticlabs_ethereumapis-tags.patch +++ b/third_party/com_github_prysmaticlabs_ethereumapis-tags.patch @@ -87,7 +87,7 @@ index c0fbe31..1211829 100644 protoc_gen_swagger( diff --git a/eth/v1alpha1/attestation.proto b/eth/v1alpha1/attestation.proto -index b177b76..28b4b46 100644 +index 3eee6f2..afbca04 100644 --- a/eth/v1alpha1/attestation.proto +++ b/eth/v1alpha1/attestation.proto @@ -15,6 +15,8 @@ syntax = "proto3"; @@ -122,8 +122,17 @@ index b177b76..28b4b46 100644 + bytes selection_proof = 2 [(gogoproto.moretags) = "ssz-size:\"96\""]; } + message SignedAggregateAttestationAndProof { +@@ -49,7 +51,7 @@ message SignedAggregateAttestationAndProof { + AggregateAttestationAndProof message = 1; + + // 96 byte BLS aggregate signature signed by the aggregator over the message. +- bytes signature = 2; ++ bytes signature = 2 [(gogoproto.moretags) = "ssz-size:\"96\""]; + } + message AttestationData { -@@ -55,7 +57,7 @@ message AttestationData { +@@ -63,7 +65,7 @@ message AttestationData { uint64 committee_index = 2; // 32 byte root of the LMD GHOST block vote. @@ -132,7 +141,7 @@ index b177b76..28b4b46 100644 // The most recent justified checkpoint in the beacon state Checkpoint source = 4; -@@ -91,5 +93,5 @@ message Checkpoint { +@@ -99,5 +101,5 @@ message Checkpoint { uint64 epoch = 1; // Block root of the checkpoint references. @@ -140,7 +149,7 @@ index b177b76..28b4b46 100644 + bytes root = 2 [(gogoproto.moretags) = "ssz-size:\"32\""]; } diff --git a/eth/v1alpha1/beacon_block.proto b/eth/v1alpha1/beacon_block.proto -index 2ce5c34..4cbb276 100644 +index ece6613..f396a77 100644 --- a/eth/v1alpha1/beacon_block.proto +++ b/eth/v1alpha1/beacon_block.proto @@ -15,6 +15,7 @@ syntax = "proto3"; @@ -151,20 +160,20 @@ index 2ce5c34..4cbb276 100644 import "eth/v1alpha1/attestation.proto"; option csharp_namespace = "Ethereum.Eth.v1alpha1"; -@@ -30,10 +31,10 @@ message BeaconBlock { - uint64 slot = 1; +@@ -33,10 +34,10 @@ message BeaconBlock { + uint64 proposer_index = 2; // 32 byte root of the parent block. -- bytes parent_root = 2; -+ bytes parent_root = 2 [(gogoproto.moretags) = "ssz-size:\"32\""]; +- bytes parent_root = 3; ++ bytes parent_root = 3 [(gogoproto.moretags) = "ssz-size:\"32\""]; // 32 byte root of the resulting state after processing this block. -- bytes state_root = 3; -+ bytes state_root = 3 [(gogoproto.moretags) = "ssz-size:\"32\""]; +- bytes state_root = 4; ++ bytes state_root = 4 [(gogoproto.moretags) = "ssz-size:\"32\""]; // The block body itself. - BeaconBlockBody body = 4; -@@ -45,38 +46,38 @@ message SignedBeaconBlock { + BeaconBlockBody body = 5; +@@ -48,38 +49,38 @@ message SignedBeaconBlock { BeaconBlock block = 1; // 96 byte BLS signature from the validator that produced this block. @@ -183,7 +192,7 @@ index 2ce5c34..4cbb276 100644 // 32 byte field of arbitrary data. This field may contain any data and // is not used for anything other than a fun message. -- bytes graffiti = 3; +- bytes graffiti = 3; + bytes graffiti = 3 [(gogoproto.moretags) = "ssz-size:\"32\""]; // Block operations @@ -211,7 +220,7 @@ index 2ce5c34..4cbb276 100644 } // Proposer slashings are proofs that a slashable offense has been committed by -@@ -107,20 +108,20 @@ message AttesterSlashing { +@@ -106,20 +107,20 @@ message AttesterSlashing { message Deposit { message Data { // 48 byte BLS public key of the validator. @@ -236,7 +245,7 @@ index 2ce5c34..4cbb276 100644 Data data = 2; } -@@ -142,14 +143,14 @@ message SignedVoluntaryExit { +@@ -141,14 +142,14 @@ message SignedVoluntaryExit { VoluntaryExit exit = 1; // Validator's 96 byte signature @@ -253,7 +262,7 @@ index 2ce5c34..4cbb276 100644 // The total number of deposits included in the beacon chain since genesis // including the deposits in this block. -@@ -157,7 +158,7 @@ message Eth1Data { +@@ -156,7 +157,7 @@ message Eth1Data { // The 32 byte block hash of the Ethereum 1.x block considered for deposit // inclusion. @@ -262,24 +271,24 @@ index 2ce5c34..4cbb276 100644 } // A beacon block header is essentially a beacon block with only a reference to -@@ -169,13 +170,13 @@ message BeaconBlockHeader { - uint64 slot = 1; +@@ -171,13 +172,13 @@ message BeaconBlockHeader { + uint64 proposer_index = 2; // 32 byte merkle tree root of the parent ssz encoded block. -- bytes parent_root = 2; -+ bytes parent_root = 2 [(gogoproto.moretags) = "ssz-size:\"32\""]; +- bytes parent_root = 3; ++ bytes parent_root = 3 [(gogoproto.moretags) = "ssz-size:\"32\""]; // 32 byte merkle tree root of the resulting ssz encoded state after processing this block. -- bytes state_root = 3; -+ bytes state_root = 3 [(gogoproto.moretags) = "ssz-size:\"32\""]; +- bytes state_root = 4; ++ bytes state_root = 4 [(gogoproto.moretags) = "ssz-size:\"32\""]; // 32 byte merkle tree root of the ssz encoded block body. -- bytes body_root = 4; -+ bytes body_root = 4 [(gogoproto.moretags) = "ssz-size:\"32\""]; +- bytes body_root = 5; ++ bytes body_root = 5 [(gogoproto.moretags) = "ssz-size:\"32\""]; } message SignedBeaconBlockHeader { -@@ -183,14 +184,14 @@ message SignedBeaconBlockHeader { +@@ -185,14 +186,14 @@ message SignedBeaconBlockHeader { BeaconBlockHeader header = 1; // 96 byte BLS signature from the validator that produced this block header. @@ -298,7 +307,7 @@ index 2ce5c34..4cbb276 100644 + bytes signature = 3 [(gogoproto.moretags) = "ssz-size:\"96\""]; } diff --git a/eth/v1alpha1/beacon_chain.proto b/eth/v1alpha1/beacon_chain.proto -index 8ee263b..60607de 100644 +index 6a7940d..b44835e 100644 --- a/eth/v1alpha1/beacon_chain.proto +++ b/eth/v1alpha1/beacon_chain.proto @@ -15,6 +15,7 @@ syntax = "proto3"; @@ -320,7 +329,7 @@ index 8ee263b..60607de 100644 uint64 finalized_slot = 4; @@ -407,7 +408,7 @@ message ChainHead { uint64 finalized_epoch = 5; - + // Most recent 32 byte finalized block root. - bytes finalized_block_root = 6; + bytes finalized_block_root = 6 [(gogoproto.moretags) = "ssz-size:\"32\""]; @@ -329,7 +338,7 @@ index 8ee263b..60607de 100644 uint64 justified_slot = 7; @@ -416,7 +417,7 @@ message ChainHead { uint64 justified_epoch = 8; - + // Most recent 32 byte justified block root. - bytes justified_block_root = 9; + bytes justified_block_root = 9 [(gogoproto.moretags) = "ssz-size:\"32\""]; @@ -345,16 +354,17 @@ index 8ee263b..60607de 100644 } message ListCommitteesRequest { -@@ -470,7 +471,7 @@ message ListValidatorBalancesRequest { +@@ -470,8 +471,7 @@ message ListValidatorBalancesRequest { // Validator 48 byte BLS public keys to filter validators for the given // epoch. - repeated bytes public_keys = 3; +- + repeated bytes public_keys = 3 [(gogoproto.moretags) = "ssz-size:\"?,48\""]; - // Validator indices to filter validators for the given epoch. repeated uint64 indices = 4; -@@ -491,7 +492,7 @@ message ValidatorBalances { + +@@ -491,7 +491,7 @@ message ValidatorBalances { message Balance { // Validator's 48 byte BLS public key. @@ -363,7 +373,7 @@ index 8ee263b..60607de 100644 // Validator's index in the validator set. uint64 index = 2; -@@ -548,7 +549,7 @@ message GetValidatorRequest { +@@ -548,7 +548,7 @@ message GetValidatorRequest { uint64 index = 1; // 48 byte validator public key. @@ -372,7 +382,7 @@ index 8ee263b..60607de 100644 } } -@@ -590,26 +591,25 @@ message ActiveSetChanges { +@@ -590,26 +590,25 @@ message ActiveSetChanges { uint64 epoch = 1; // 48 byte validator public keys that have been activated in the given epoch. @@ -405,7 +415,7 @@ index 8ee263b..60607de 100644 // Indices of validators ejected in the given epoch. repeated uint64 ejected_indices = 9; -@@ -659,11 +659,11 @@ message ValidatorQueue { +@@ -659,11 +658,11 @@ message ValidatorQueue { // Ordered list of 48 byte public keys awaiting activation. 0th index is the // next key to be processed. @@ -419,17 +429,18 @@ index 8ee263b..60607de 100644 } message ListValidatorAssignmentsRequest { -@@ -675,7 +675,7 @@ message ListValidatorAssignmentsRequest { +@@ -675,8 +674,7 @@ message ListValidatorAssignmentsRequest { bool genesis = 2; } // 48 byte validator public keys to filter assignments for the given epoch. - repeated bytes public_keys = 3; +- + repeated bytes public_keys = 3 [(gogoproto.moretags) = "ssz-size:\"?,48\""]; - // Validator indicies to filter assignments for the given epoch. repeated uint64 indices = 4; -@@ -710,7 +710,7 @@ message ValidatorAssignments { - uint64 proposer_slot = 4; + +@@ -710,7 +708,7 @@ message ValidatorAssignments { + repeated uint64 proposer_slots = 4; // 48 byte BLS public key. - bytes public_key = 5; @@ -438,7 +449,7 @@ index 8ee263b..60607de 100644 // The epoch for which this set of validator assignments is valid. diff --git a/eth/v1alpha1/validator.proto b/eth/v1alpha1/validator.proto -index 47203c1..9e72b10 100644 +index 19fc019..2db4a55 100644 --- a/eth/v1alpha1/validator.proto +++ b/eth/v1alpha1/validator.proto @@ -15,6 +15,7 @@ syntax = "proto3"; @@ -449,7 +460,7 @@ index 47203c1..9e72b10 100644 import "google/api/annotations.proto"; import "google/protobuf/empty.proto"; import "eth/v1alpha1/beacon_block.proto"; -@@ -191,7 +192,7 @@ message DomainResponse { +@@ -221,7 +222,7 @@ message DomainResponse { message ValidatorActivationRequest { // A list of 48 byte validator public keys. @@ -458,7 +469,7 @@ index 47203c1..9e72b10 100644 } message ValidatorActivationResponse { -@@ -217,7 +218,7 @@ message ChainStartResponse { +@@ -257,7 +258,7 @@ message SyncedResponse { message ValidatorIndexRequest { // A 48 byte validator public key. @@ -467,7 +478,7 @@ index 47203c1..9e72b10 100644 } message ValidatorIndexResponse { -@@ -227,7 +228,7 @@ message ValidatorIndexResponse { +@@ -267,7 +268,7 @@ message ValidatorIndexResponse { message ValidatorStatusRequest { // A 48 byte validator public key. @@ -476,7 +487,7 @@ index 47203c1..9e72b10 100644 } enum ValidatorStatus { -@@ -265,7 +266,7 @@ message DutiesRequest { +@@ -305,7 +306,7 @@ message DutiesRequest { uint64 epoch = 1; // Array of byte encoded BLS public keys. @@ -485,8 +496,8 @@ index 47203c1..9e72b10 100644 } message DutiesResponse { -@@ -284,7 +285,7 @@ message DutiesResponse { - uint64 proposer_slot = 4; +@@ -324,7 +325,7 @@ message DutiesResponse { + repeated uint64 proposer_slots = 4; // 48 byte BLS public key for the validator who's assigned to perform a duty. - bytes public_key = 5; @@ -494,7 +505,7 @@ index 47203c1..9e72b10 100644 // The current status of the validator assigned to perform the duty. ValidatorStatus status = 6; -@@ -299,15 +300,16 @@ message BlockRequest { +@@ -339,15 +340,16 @@ message BlockRequest { uint64 slot = 1; // Validator's 32 byte randao reveal secret of the current epoch. @@ -514,7 +525,7 @@ index 47203c1..9e72b10 100644 } message AttestationDataRequest { -@@ -320,7 +322,7 @@ message AttestationDataRequest { +@@ -360,7 +362,7 @@ message AttestationDataRequest { message AttestResponse { // The root of the attestation data successfully submitted to the beacon node. @@ -522,8 +533,30 @@ index 47203c1..9e72b10 100644 + bytes attestation_data_root = 1 [(gogoproto.moretags) = "ssz-size:\"32\""]; } - message AggregationRequest { -@@ -343,10 +345,10 @@ message AggregationResponse { + message AggregateSelectionRequest { +@@ -369,10 +371,10 @@ message AggregateSelectionRequest { + // Committee index of the validator at the given slot. + uint64 committee_index = 2; + // 48 byte public key of the validator. +- bytes public_key = 3; ++ bytes public_key = 3 [(gogoproto.moretags) = "ssz-size:\"48\" spec-name:\"pubkey\""]; + // 96 byte signature of the validator on the slot. This is used as proof that the validator is + // an aggregator for the given slot. +- bytes slot_signature = 4; ++ bytes slot_signature = 4 [(gogoproto.moretags) = "ssz-size:\"96\""]; + } + + message AggregateSelectionResponse { +@@ -387,7 +389,7 @@ message SignedAggregateSubmitRequest { + + message SignedAggregateSubmitResponse { + // The 32 byte hash tree root of the aggregated attestation data. +- bytes attestation_data_root = 1; ++ bytes attestation_data_root = 1 [(gogoproto.moretags) = "ssz-size:\"32\""]; + } + + message CommitteeSubnetsSubscribeRequest { +@@ -405,10 +407,10 @@ message CommitteeSubnetsSubscribeRequest { // An Ethereum 2.0 validator. message Validator { // 48 byte BLS public key used for the validator's activities. diff --git a/third_party/herumi/bls_eth_go_binary.BUILD b/third_party/herumi/bls_eth_go_binary.BUILD index be8738ccb9..6e2625123b 100644 --- a/third_party/herumi/bls_eth_go_binary.BUILD +++ b/third_party/herumi/bls_eth_go_binary.BUILD @@ -28,12 +28,10 @@ OPTS = [ }) + select({ ":use_openssl": [], "//conditions:default": [ - "-DMCL_DONT_USE_OPENSSL", + "-DMCL_DONT_USE_OPENSSL", ], }) - - genrule( name = "base64_ll", outs = ["src/base64.ll"], # llvm assembly language file. @@ -81,10 +79,10 @@ cc_library( }) + select({ ":use_openssl": [ "-lssl", - "-lcrypto" + "-lcrypto", ], "//conditions:default": [], - }) + }), ) cc_library( @@ -145,5 +143,8 @@ go_library( }), copts = OPTS, cgo = True, - visibility = ["//visibility:public"], + visibility = [ + # Additional access will require security approval. + "@prysm//shared/bls:__pkg__", + ], ) diff --git a/third_party/herumi/bls_eth_go_binary_serialization_alloc_fix.patch b/third_party/herumi/bls_eth_go_binary_serialization_alloc_fix.patch deleted file mode 100644 index 202fcdc854..0000000000 --- a/third_party/herumi/bls_eth_go_binary_serialization_alloc_fix.patch +++ /dev/null @@ -1,49 +0,0 @@ -diff --git a/bls/bls.go b/bls/bls.go -index bc3b607..f6fa95f 100644 ---- a/bls/bls.go -+++ b/bls/bls.go -@@ -157,7 +157,7 @@ type SecretKey struct { - - // Serialize -- - func (sec *SecretKey) Serialize() []byte { -- buf := make([]byte, 2048) -+ buf := make([]byte, 32) - // #nosec - n := C.blsSecretKeySerialize(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &sec.v) - if n == 0 { -@@ -354,7 +354,7 @@ func (keys PublicKeys) JSON() string { - - // Serialize -- - func (pub *PublicKey) Serialize() []byte { -- buf := make([]byte, 2048) -+ buf := make([]byte, 48) - // #nosec - n := C.blsPublicKeySerialize(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &pub.v) - if n == 0 { -@@ -452,7 +452,7 @@ type Sign struct { - - // Serialize -- - func (sig *Sign) Serialize() []byte { -- buf := make([]byte, 2048) -+ buf := make([]byte, 96) - // #nosec - n := C.blsSignatureSerialize(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &sig.v) - if n == 0 { -@@ -665,7 +665,7 @@ func (sig *Sign) VerifyHashWithDomain(pub *PublicKey, hashWithDomain []byte) boo - - // SerializeUncompressed -- - func (pub *PublicKey) SerializeUncompressed() []byte { -- buf := make([]byte, 2048) -+ buf := make([]byte, 96) - // #nosec - n := C.blsPublicKeySerializeUncompressed(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &pub.v) - if n == 0 { -@@ -676,7 +676,7 @@ func (pub *PublicKey) SerializeUncompressed() []byte { - - // SerializeUncompressed -- - func (sig *Sign) SerializeUncompressed() []byte { -- buf := make([]byte, 2048) -+ buf := make([]byte, 192) - // #nosec - n := C.blsSignatureSerializeUncompressed(unsafe.Pointer(&buf[0]), C.mclSize(len(buf)), &sig.v) - if n == 0 { diff --git a/third_party/herumi/herumi.bzl b/third_party/herumi/herumi.bzl index 57994a1ef4..300c0e0c9b 100644 --- a/third_party/herumi/herumi.bzl +++ b/third_party/herumi/herumi.bzl @@ -8,21 +8,15 @@ Herumi's BLS library for go depends on """ def bls_dependencies(): - # TODO(4804): Update herumi_bls_eth_go_binary and herumi_bls to latest supporting v0.10.0. _maybe( http_archive, name = "herumi_bls_eth_go_binary", - strip_prefix = "bls-eth-go-binary-147ed25f233ed0b211e711ed8271606540c58064", + strip_prefix = "bls-eth-go-binary-f58f8b8bd66f1d286d650e9b39b9629411067f82", urls = [ - "https://github.com/herumi/bls-eth-go-binary/archive/147ed25f233ed0b211e711ed8271606540c58064.tar.gz", + "https://github.com/herumi/bls-eth-go-binary/archive/f58f8b8bd66f1d286d650e9b39b9629411067f82.tar.gz", ], - sha256 = "bbd04f3354f12982e4ef32c62eb13ceb183303ada1ee69e2869553ed35134321", + sha256 = "1a61363fdc33018d4334481410082c804f5aa083c4454b7eef3b2395a0df98f1", build_file = "@prysm//third_party/herumi:bls_eth_go_binary.BUILD", - # TODO(4804): Delete this patch after updating this archive to commit 381c62473c28af84f424cfb1521c97e48289174a or later. - patches = [ - "@prysm//third_party/herumi:bls_eth_go_binary_serialization_alloc_fix.patch", # Integrates changes from PR #5. - ], - patch_args = ["-p1"], ) _maybe( http_archive, @@ -37,11 +31,11 @@ def bls_dependencies(): _maybe( http_archive, name = "herumi_bls", - strip_prefix = "bls-b0e010004293a7ffd2a626edc2062950abd09938", + strip_prefix = "bls-989e28ede489e5f0e50cfc87e3fd8a8767155b9f", urls = [ - "https://github.com/herumi/bls/archive/b0e010004293a7ffd2a626edc2062950abd09938.tar.gz", + "https://github.com/herumi/bls/archive/989e28ede489e5f0e50cfc87e3fd8a8767155b9f.tar.gz", ], - sha256 = "c7300970c8a639cbbe7465d10f412d6c6ab162b15f2e184b191c9763c2241da4", + sha256 = "14b441cc66ca7e6c4e0542dcfc6d9f83f4472f0e7a43efaa1d3ea93e2e2b7491", build_file = "@prysm//third_party/herumi:bls.BUILD", ) diff --git a/tools/benchmark-files-gen/main.go b/tools/benchmark-files-gen/main.go index 45787fc291..98ed069660 100644 --- a/tools/benchmark-files-gen/main.go +++ b/tools/benchmark-files-gen/main.go @@ -133,12 +133,9 @@ func generateMarshalledFullStateAndBlock() error { return errors.Wrap(err, "could not calculate state root") } block.Block.StateRoot = s[:] - blockRoot, err := ssz.HashTreeRoot(block.Block) - if err != nil { - return errors.Wrap(err, "could not get signing root of block") - } // Temporarily incrementing the beacon state slot here since BeaconProposerIndex is a // function deterministic on beacon state slot. + root := beaconState.GenesisValidatorRoot() if err := beaconState.SetSlot(beaconState.Slot() + 1); err != nil { return err } @@ -146,16 +143,20 @@ func generateMarshalledFullStateAndBlock() error { if err != nil { return err } - domain, err := helpers.Domain(beaconState.Fork(), helpers.CurrentEpoch(beaconState), params.BeaconConfig().DomainBeaconProposer) + domain, err := helpers.Domain(beaconState.Fork(), helpers.CurrentEpoch(beaconState), params.BeaconConfig().DomainBeaconProposer, root) if err != nil { return err } - block.Signature = privs[proposerIdx].Sign(blockRoot[:], domain).Marshal() + blockRoot, err := helpers.ComputeSigningRoot(block.Block, domain) + if err != nil { + return errors.Wrap(err, "could not get signing root of block") + } + block.Signature = privs[proposerIdx].Sign(blockRoot[:]).Marshal() if err := beaconState.SetSlot(beaconState.Slot() - 1); err != nil { return err } - beaconBytes, err := ssz.Marshal(beaconState) + beaconBytes, err := ssz.Marshal(beaconState.InnerStateUnsafe()) if err != nil { return err } @@ -206,7 +207,7 @@ func generate2FullEpochState() error { } } - beaconBytes, err := ssz.Marshal(beaconState) + beaconBytes, err := ssz.Marshal(beaconState.InnerStateUnsafe()) if err != nil { return err } diff --git a/tools/blocktree/BUILD.bazel b/tools/blocktree/BUILD.bazel index 3d385bd467..fb7ad2e7c7 100644 --- a/tools/blocktree/BUILD.bazel +++ b/tools/blocktree/BUILD.bazel @@ -12,7 +12,6 @@ go_library( "//beacon-chain/db/filters:go_default_library", "//shared/attestationutil:go_default_library", "//shared/bytesutil:go_default_library", - "//shared/params:go_default_library", "@com_github_emicklei_dot//:go_default_library", "@com_github_prysmaticlabs_go_ssz//:go_default_library", ], diff --git a/tools/blocktree/main.go b/tools/blocktree/main.go index c55221e49e..f5a73a4ff5 100644 --- a/tools/blocktree/main.go +++ b/tools/blocktree/main.go @@ -23,7 +23,6 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/db/filters" "github.com/prysmaticlabs/prysm/shared/attestationutil" "github.com/prysmaticlabs/prysm/shared/bytesutil" - "github.com/prysmaticlabs/prysm/shared/params" ) var ( @@ -41,8 +40,6 @@ type node struct { } func main() { - params.UseDemoBeaconConfig() - flag.Parse() db, err := db.NewDB(*datadir, cache.NewStateSummaryCache()) if err != nil { diff --git a/tools/bootnode/BUILD.bazel b/tools/bootnode/BUILD.bazel index d9adaff5f9..503f179597 100644 --- a/tools/bootnode/BUILD.bazel +++ b/tools/bootnode/BUILD.bazel @@ -9,7 +9,9 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/tools/bootnode", visibility = ["//visibility:private"], deps = [ + "//proto/beacon/p2p/v1:go_default_library", "//shared/logutil:go_default_library", + "//shared/params:go_default_library", "//shared/version:go_default_library", "@com_github_btcsuite_btcd//btcec:go_default_library", "@com_github_ethereum_go_ethereum//log:go_default_library", @@ -25,6 +27,8 @@ go_library( "@com_github_libp2p_go_libp2p_kad_dht//opts:go_default_library", "@com_github_multiformats_go_multiaddr//:go_default_library", "@com_github_pkg_errors//:go_default_library", + "@com_github_prysmaticlabs_go_bitfield//:go_default_library", + "@com_github_prysmaticlabs_go_ssz//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", "@org_uber_go_automaxprocs//:go_default_library", ], @@ -42,6 +46,8 @@ go_image( tags = ["manual"], visibility = ["//visibility:private"], deps = [ + "//proto/beacon/p2p/v1:go_default_library", + "//shared/params:go_default_library", "//shared/logutil:go_default_library", "//shared/version:go_default_library", "@com_github_btcsuite_btcd//btcec:go_default_library", @@ -59,6 +65,8 @@ go_image( "@com_github_libp2p_go_libp2p_kad_dht//opts:go_default_library", "@com_github_multiformats_go_multiaddr//:go_default_library", "@com_github_pkg_errors//:go_default_library", + "@com_github_prysmaticlabs_go_bitfield//:go_default_library", + "@com_github_prysmaticlabs_go_ssz//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", "@org_uber_go_automaxprocs//:go_default_library", ], diff --git a/tools/bootnode/bootnode.go b/tools/bootnode/bootnode.go index 182e6643c0..a7b74ae65f 100644 --- a/tools/bootnode/bootnode.go +++ b/tools/bootnode/bootnode.go @@ -35,7 +35,11 @@ import ( dhtopts "github.com/libp2p/go-libp2p-kad-dht/opts" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" + "github.com/prysmaticlabs/go-bitfield" + "github.com/prysmaticlabs/go-ssz" + pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1" "github.com/prysmaticlabs/prysm/shared/logutil" + "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/version" "github.com/sirupsen/logrus" _ "go.uber.org/automaxprocs" @@ -193,6 +197,16 @@ func createLocalNode(privKey *ecdsa.PrivateKey, ipAddr net.IP, port int) (*enode return nil, errors.Wrap(err, "Could not open node's peer database") } + forkID := &pb.ENRForkID{ + CurrentForkDigest: []byte{0, 0, 0, 0}, + NextForkVersion: params.BeaconConfig().NextForkVersion, + NextForkEpoch: params.BeaconConfig().NextForkEpoch, + } + forkEntry, err := ssz.Marshal(forkID) + if err != nil { + return nil, errors.Wrap(err, "Could not marshal fork id") + } + localNode := enode.NewLocalNode(db, privKey) ipEntry := enr.IP(ipAddr) udpEntry := enr.UDP(port) @@ -200,6 +214,8 @@ func createLocalNode(privKey *ecdsa.PrivateKey, ipAddr net.IP, port int) (*enode localNode.SetFallbackUDP(port) localNode.Set(ipEntry) localNode.Set(udpEntry) + localNode.Set(enr.WithEntry("eth2", forkEntry)) + localNode.Set(enr.WithEntry("attnets", bitfield.NewBitvector64())) return localNode, nil } diff --git a/tools/cluster-pk-manager/server/BUILD.bazel b/tools/cluster-pk-manager/server/BUILD.bazel index 540c96063c..ea4c8841eb 100644 --- a/tools/cluster-pk-manager/server/BUILD.bazel +++ b/tools/cluster-pk-manager/server/BUILD.bazel @@ -22,7 +22,6 @@ go_library( "//shared/bls:go_default_library", "//shared/bytesutil:go_default_library", "//shared/keystore:go_default_library", - "//shared/params:go_default_library", "//shared/prometheus:go_default_library", "@com_github_ethereum_go_ethereum//:go_default_library", "@com_github_ethereum_go_ethereum//accounts/abi/bind:go_default_library", diff --git a/tools/cluster-pk-manager/server/main.go b/tools/cluster-pk-manager/server/main.go index 597374100e..cc6e285f97 100644 --- a/tools/cluster-pk-manager/server/main.go +++ b/tools/cluster-pk-manager/server/main.go @@ -6,7 +6,6 @@ import ( "net" pb "github.com/prysmaticlabs/prysm/proto/cluster" - "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/prometheus" "github.com/sirupsen/logrus" _ "go.uber.org/automaxprocs" @@ -20,7 +19,7 @@ var ( rpcPath = flag.String("rpc", "https://goerli.prylabs.net", "RPC address of a running ETH1 node") beaconRPCPath = flag.String("beaconRPC", "localhost:4000", "RPC address of Beacon Node") depositContractAddr = flag.String("deposit-contract", "", "Address of the deposit contract") - depositAmount = flag.Int64("deposit-amount", 0, "The amount of wei to deposit into the contract") + depositAmount = flag.String("deposit-amount", "", "The amount of wei to deposit into the contract") dbPath = flag.String("db-path", "", "The file path for database storage") disableWatchtower = flag.Bool("disable-watchtower", false, "Disable kubernetes pod watcher. Useful for local testing") verbose = flag.Bool("verbose", false, "Enable debug logging") @@ -33,8 +32,6 @@ func main() { if *verbose { logrus.SetLevel(logrus.DebugLevel) } - // use demo-config for cluster deployments - params.UseDemoBeaconConfig() if *ensureDeposited { log.Warn("--ensure-deposited: Ensuring all keys are deposited or deleting them from database!") } diff --git a/tools/cluster-pk-manager/server/server.go b/tools/cluster-pk-manager/server/server.go index 2e7e309aa5..2f70277cb9 100644 --- a/tools/cluster-pk-manager/server/server.go +++ b/tools/cluster-pk-manager/server/server.go @@ -43,7 +43,7 @@ func newServer( rpcAddr string, depositContractAddr string, funderPK string, - validatorDepositAmount int64, + validatorDepositAmount string, beaconRPCAddr string, ) *server { rpcClient, err := rpc.Dial(rpcAddr) @@ -62,7 +62,8 @@ func newServer( panic(err) } - depositAmount := big.NewInt(validatorDepositAmount) + depositAmount := big.NewInt(0) + depositAmount.SetString(validatorDepositAmount, 10) conn, err := grpc.DialContext(context.Background(), beaconRPCAddr, grpc.WithInsecure(), grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) if err != nil { diff --git a/tools/faucet/server.go b/tools/faucet/server.go index 89cf758838..aed1730b05 100644 --- a/tools/faucet/server.go +++ b/tools/faucet/server.go @@ -24,7 +24,7 @@ import ( const ipLimit = 5 -var fundingAmount = big.NewInt(3.5 * params.Ether) +var fundingAmount *big.Int var funded = make(map[string]bool) var ipCounter = make(map[string]int) var fundingLock sync.Mutex @@ -38,6 +38,14 @@ type faucetServer struct { minScore float64 } +func init() { + var ok bool + fundingAmount, ok = new(big.Int).SetString("32500000000000000000", 10) + if !ok { + log.Fatal("could not set funding amount") + } +} + func newFaucetServer( r recaptcha.Recaptcha, rpcPath string, diff --git a/tools/forkchecker/forkchecker.go b/tools/forkchecker/forkchecker.go index f677336365..b8ea409a23 100644 --- a/tools/forkchecker/forkchecker.go +++ b/tools/forkchecker/forkchecker.go @@ -37,8 +37,6 @@ func (e *endpoint) Set(value string) error { } func main() { - params.UseDemoBeaconConfig() - var endpts endpoint clients := make(map[string]pb.BeaconChainClient) diff --git a/tools/sendDepositTx/sendDeposits.go b/tools/sendDepositTx/sendDeposits.go index 8069567167..64fc9d894e 100644 --- a/tools/sendDepositTx/sendDeposits.go +++ b/tools/sendDepositTx/sendDeposits.go @@ -97,7 +97,7 @@ func main() { }, &cli.Int64Flag{ Name: "depositAmount", - Value: 3200, + Value: int64(params.BeaconConfig().MaxEffectiveBalance), Usage: "Maximum deposit value allowed in contract(in gwei)", Destination: &depositAmount, }, diff --git a/validator/client/BUILD.bazel b/validator/client/BUILD.bazel index 3fd47d56f0..d7d275221f 100644 --- a/validator/client/BUILD.bazel +++ b/validator/client/BUILD.bazel @@ -34,6 +34,7 @@ go_library( "@com_github_grpc_ecosystem_go_grpc_middleware//retry:go_default_library", "@com_github_grpc_ecosystem_go_grpc_middleware//tracing/opentracing:go_default_library", "@com_github_grpc_ecosystem_go_grpc_prometheus//:go_default_library", + "@com_github_hashicorp_golang_lru//:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_prometheus_client_golang//prometheus:go_default_library", "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", @@ -65,6 +66,7 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//beacon-chain/core/helpers:go_default_library", "//proto/slashing:go_default_library", "//shared:go_default_library", "//shared/bls:go_default_library", @@ -81,10 +83,11 @@ go_test( "//validator/keymanager:go_default_library", "@com_github_gogo_protobuf//types:go_default_library", "@com_github_golang_mock//gomock:go_default_library", + "@com_github_hashicorp_golang_lru//:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", - "@com_github_prysmaticlabs_go_ssz//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", "@com_github_sirupsen_logrus//hooks/test:go_default_library", + "@in_gopkg_d4l3k_messagediff_v1//:go_default_library", ], ) diff --git a/validator/client/service.go b/validator/client/service.go index 275dcf995e..50d7b75d93 100644 --- a/validator/client/service.go +++ b/validator/client/service.go @@ -9,8 +9,14 @@ import ( grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + lru "github.com/hashicorp/golang-lru" "github.com/pkg/errors" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/shared/bytesutil" + "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/validator/db" "github.com/prysmaticlabs/prysm/validator/keymanager" "github.com/sirupsen/logrus" @@ -160,18 +166,24 @@ func (v *ValidatorService) Start() { panic(err) } + aggregatedSlotCommitteeIDCache, err := lru.New(int(params.BeaconConfig().MaxCommitteesPerSlot)) + if err != nil { + log.Errorf("Could not initialize cache: %v", err) + return + } v.validator = &validator{ - db: valDB, - validatorClient: ethpb.NewBeaconNodeValidatorClient(v.conn), - beaconClient: ethpb.NewBeaconChainClient(v.conn), - node: ethpb.NewNodeClient(v.conn), - keyManager: v.keyManager, - graffiti: v.graffiti, - logValidatorBalances: v.logValidatorBalances, - emitAccountMetrics: v.emitAccountMetrics, - prevBalance: make(map[[48]byte]uint64), - attLogs: make(map[[32]byte]*attSubmitted), - domainDataCache: cache, + db: valDB, + validatorClient: ethpb.NewBeaconNodeValidatorClient(v.conn), + beaconClient: ethpb.NewBeaconChainClient(v.conn), + node: ethpb.NewNodeClient(v.conn), + keyManager: v.keyManager, + graffiti: v.graffiti, + logValidatorBalances: v.logValidatorBalances, + emitAccountMetrics: v.emitAccountMetrics, + prevBalance: make(map[[48]byte]uint64), + attLogs: make(map[[32]byte]*attSubmitted), + domainDataCache: cache, + aggregatedSlotCommitteeIDCache: aggregatedSlotCommitteeIDCache, } go run(v.ctx, v.validator) } @@ -195,3 +207,20 @@ func (v *ValidatorService) Status() error { } return nil } + +// signObject signs a generic object, with protection if available. +func (v *validator) signObject(pubKey [48]byte, object interface{}, domain []byte) (*bls.Signature, error) { + if protectingKeymanager, supported := v.keyManager.(keymanager.ProtectingKeyManager); supported { + root, err := ssz.HashTreeRoot(object) + if err != nil { + return nil, err + } + return protectingKeymanager.SignGeneric(pubKey, root, bytesutil.ToBytes32(domain)) + } + + root, err := helpers.ComputeSigningRoot(object, domain) + if err != nil { + return nil, err + } + return v.keyManager.Sign(pubKey, root) +} diff --git a/validator/client/validator.go b/validator/client/validator.go index b27b32859f..064be101cd 100644 --- a/validator/client/validator.go +++ b/validator/client/validator.go @@ -15,6 +15,7 @@ import ( "github.com/dgraph-io/ristretto" "github.com/gogo/protobuf/proto" ptypes "github.com/gogo/protobuf/types" + lru "github.com/hashicorp/golang-lru" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -41,22 +42,24 @@ const ( ) type validator struct { - genesisTime uint64 - ticker *slotutil.SlotTicker - db *db.Store - duties *ethpb.DutiesResponse - validatorClient ethpb.BeaconNodeValidatorClient - beaconClient ethpb.BeaconChainClient - graffiti []byte - node ethpb.NodeClient - keyManager keymanager.KeyManager - prevBalance map[[48]byte]uint64 - logValidatorBalances bool - emitAccountMetrics bool - attLogs map[[32]byte]*attSubmitted - attLogsLock sync.Mutex - domainDataLock sync.Mutex - domainDataCache *ristretto.Cache + genesisTime uint64 + ticker *slotutil.SlotTicker + db *db.Store + duties *ethpb.DutiesResponse + validatorClient ethpb.BeaconNodeValidatorClient + beaconClient ethpb.BeaconChainClient + graffiti []byte + node ethpb.NodeClient + keyManager keymanager.KeyManager + prevBalance map[[48]byte]uint64 + logValidatorBalances bool + emitAccountMetrics bool + attLogs map[[32]byte]*attSubmitted + attLogsLock sync.Mutex + domainDataLock sync.Mutex + domainDataCache *ristretto.Cache + aggregatedSlotCommitteeIDCache *lru.Cache + aggregatedSlotCommitteeIDCacheLock sync.Mutex } var validatorStatusesGaugeVec = promauto.NewGaugeVec( @@ -286,6 +289,10 @@ func (v *validator) UpdateDuties(ctx context.Context, slot uint64) error { } v.duties = resp + subscribeSlots := make([]uint64, 0, len(validatingKeys)) + subscribeCommitteeIDs := make([]uint64, 0, len(validatingKeys)) + subscribeIsAggregator := make([]bool, 0, len(validatingKeys)) + alreadySubscribed := make(map[[64]byte]bool) for _, duty := range v.duties.Duties { lFields := logrus.Fields{ @@ -302,16 +309,71 @@ func (v *validator) UpdateDuties(ctx context.Context, slot uint64) error { } if duty.Status == ethpb.ValidatorStatus_ACTIVE { - if duty.ProposerSlot > 0 { - lFields["proposerSlot"] = duty.ProposerSlot - } - lFields["attesterSlot"] = duty.AttesterSlot - } + attesterSlot := duty.AttesterSlot + committeeIndex := duty.CommitteeIndex - log.WithFields(lFields).Info("New assignment") + if len(duty.ProposerSlots) > 0 { + lFields["proposerSlots"] = duty.ProposerSlots + } + lFields["attesterSlot"] = attesterSlot + + alreadySubscribedKey := validatorSubscribeKey(attesterSlot, committeeIndex) + if _, ok := alreadySubscribed[alreadySubscribedKey]; ok { + continue + } + + aggregator, err := v.isAggregator(ctx, duty.Committee, attesterSlot, bytesutil.ToBytes48(duty.PublicKey)) + if err != nil { + return errors.Wrap(err, "could not check if a validator is an aggregator") + } + if aggregator { + alreadySubscribed[alreadySubscribedKey] = true + } + subscribeSlots = append(subscribeSlots, attesterSlot) + subscribeCommitteeIDs = append(subscribeCommitteeIDs, committeeIndex) + subscribeIsAggregator = append(subscribeIsAggregator, aggregator) + log.WithFields(lFields).Info("New assignment") + } } - return nil + // Notify beacon node to subscribe to the attester and aggregator subnets for the next epoch. + req.Epoch++ + dutiesNextEpoch, err := v.validatorClient.GetDuties(ctx, req) + if err != nil { + log.Error(err) + return err + } + for _, duty := range dutiesNextEpoch.Duties { + if duty.Status == ethpb.ValidatorStatus_ACTIVE { + attesterSlot := duty.AttesterSlot + committeeIndex := duty.CommitteeIndex + + alreadySubscribedKey := validatorSubscribeKey(attesterSlot, committeeIndex) + if _, ok := alreadySubscribed[alreadySubscribedKey]; ok { + continue + } + + aggregator, err := v.isAggregator(ctx, duty.Committee, attesterSlot, bytesutil.ToBytes48(duty.PublicKey)) + if err != nil { + return errors.Wrap(err, "could not check if a validator is an aggregator") + } + if aggregator { + alreadySubscribed[alreadySubscribedKey] = true + } + + subscribeSlots = append(subscribeSlots, attesterSlot) + subscribeCommitteeIDs = append(subscribeCommitteeIDs, committeeIndex) + subscribeIsAggregator = append(subscribeIsAggregator, aggregator) + } + } + + _, err = v.validatorClient.SubscribeCommitteeSubnets(ctx, ðpb.CommitteeSubnetsSubscribeRequest{ + Slots: subscribeSlots, + CommitteeIds: subscribeCommitteeIDs, + IsAggregator: subscribeIsAggregator, + }) + + return err } // RolesAt slot returns the validator roles at the given slot. Returns nil if the @@ -325,8 +387,13 @@ func (v *validator) RolesAt(ctx context.Context, slot uint64) (map[[48]byte][]va if duty == nil { continue } - if duty.ProposerSlot > 0 && duty.ProposerSlot == slot { - roles = append(roles, roleProposer) + if len(duty.ProposerSlots) > 0 { + for _, proposerSlot := range duty.ProposerSlots { + if proposerSlot != 0 && proposerSlot == slot { + roles = append(roles, roleProposer) + break + } + } } if duty.AttesterSlot == slot { roles = append(roles, roleAttester) @@ -418,3 +485,9 @@ func (v *validator) domainData(ctx context.Context, epoch uint64, domain []byte) return res, nil } + +// This constructs a validator subscribed key, it's used to track +// which subnet has already been pending requested. +func validatorSubscribeKey(slot uint64, committeeID uint64) [64]byte { + return bytesutil.ToBytes64(append(bytesutil.Bytes32(slot), bytesutil.Bytes32(committeeID)...)) +} diff --git a/validator/client/validator_aggregate.go b/validator/client/validator_aggregate.go index db364eb07a..c83140b995 100644 --- a/validator/client/validator_aggregate.go +++ b/validator/client/validator_aggregate.go @@ -9,7 +9,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-ssz" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/roughtime" @@ -60,6 +59,15 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu return } + // Avoid sending beacon node duplicated aggregation requests. + k := validatorSubscribeKey(slot, duty.CommitteeIndex) + v.aggregatedSlotCommitteeIDCacheLock.Lock() + defer v.aggregatedSlotCommitteeIDCacheLock.Unlock() + if v.aggregatedSlotCommitteeIDCache.Contains(k) { + return + } + v.aggregatedSlotCommitteeIDCache.Add(k, true) + slotSig, err := v.signSlot(ctx, pubKey, slot) if err != nil { log.Errorf("Could not sign slot: %v", err) @@ -74,14 +82,39 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu // https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#broadcast-aggregate v.waitToSlotTwoThirds(ctx, slot) - _, err = v.validatorClient.SubmitAggregateAndProof(ctx, ðpb.AggregationRequest{ + res, err := v.validatorClient.SubmitAggregateSelectionProof(ctx, ðpb.AggregateSelectionRequest{ Slot: slot, CommitteeIndex: duty.CommitteeIndex, PublicKey: pubKey[:], SlotSignature: slotSig, }) if err != nil { - log.Errorf("Could not submit slot signature to beacon node: %v", err) + log.WithField("slot", slot).Errorf("Could not submit slot signature to beacon node: %v", err) + if v.emitAccountMetrics { + validatorAggFailVec.WithLabelValues(fmtKey).Inc() + } + return + } + + d, err := v.domainData(ctx, helpers.SlotToEpoch(res.AggregateAndProof.Aggregate.Data.Slot), params.BeaconConfig().DomainAggregateAndProof[:]) + if err != nil { + log.Errorf("Could not get domain data to sign aggregate and proof: %v", err) + return + } + signedRoot, err := helpers.ComputeSigningRoot(res.AggregateAndProof, d.SignatureDomain) + if err != nil { + log.Errorf("Could not compute sign root for aggregate and proof: %v", err) + return + } + + _, err = v.validatorClient.SubmitSignedAggregateSelectionProof(ctx, ðpb.SignedAggregateSubmitRequest{ + SignedAggregateAndProof: ðpb.SignedAggregateAttestationAndProof{ + Message: res.AggregateAndProof, + Signature: signedRoot[:], + }, + }) + if err != nil { + log.Errorf("Could not submit signed aggregate and proof to beacon node: %v", err) if v.emitAccountMetrics { validatorAggFailVec.WithLabelValues(fmtKey).Inc() } @@ -104,17 +137,12 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot uint64, pu // This implements selection logic outlined in: // https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/validator/0_beacon-chain-validator.md#aggregation-selection func (v *validator) signSlot(ctx context.Context, pubKey [48]byte, slot uint64) ([]byte, error) { - domain, err := v.domainData(ctx, helpers.SlotToEpoch(slot), params.BeaconConfig().DomainBeaconAttester[:]) + domain, err := v.domainData(ctx, helpers.SlotToEpoch(slot), params.BeaconConfig().DomainSelectionProof[:]) if err != nil { return nil, err } - slotRoot, err := ssz.HashTreeRoot(slot) - if err != nil { - return nil, err - } - - sig, err := v.keyManager.Sign(pubKey, slotRoot, domain.SignatureDomain) + sig, err := v.signObject(pubKey, slot, domain.SignatureDomain) if err != nil { return nil, errors.Wrap(err, "Failed to sign slot") } diff --git a/validator/client/validator_aggregate_test.go b/validator/client/validator_aggregate_test.go index f370eaa851..9a33eb3e98 100644 --- a/validator/client/validator_aggregate_test.go +++ b/validator/client/validator_aggregate_test.go @@ -3,11 +3,11 @@ package client import ( "context" "testing" - "time" "github.com/golang/mock/gomock" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/prysm/shared/params" + "github.com/prysmaticlabs/prysm/shared/roughtime" "github.com/prysmaticlabs/prysm/shared/testutil" logTest "github.com/sirupsen/logrus/hooks/test" ) @@ -39,10 +39,26 @@ func TestSubmitAggregateAndProof_Ok(t *testing.T) { gomock.Any(), // epoch ).Return(ðpb.DomainResponse{}, nil /*err*/) - m.validatorClient.EXPECT().SubmitAggregateAndProof( + m.validatorClient.EXPECT().SubmitAggregateSelectionProof( gomock.Any(), // ctx - gomock.AssignableToTypeOf(ðpb.AggregationRequest{}), - ).Return(ðpb.AggregationResponse{}, nil) + gomock.AssignableToTypeOf(ðpb.AggregateSelectionRequest{}), + ).Return(ðpb.AggregateSelectionResponse{ + AggregateAndProof: ðpb.AggregateAttestationAndProof{ + AggregatorIndex: 0, + Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{}}, + SelectionProof: nil, + }, + }, nil) + + m.validatorClient.EXPECT().DomainData( + gomock.Any(), // ctx + gomock.Any(), // epoch + ).Return(ðpb.DomainResponse{}, nil /*err*/) + + m.validatorClient.EXPECT().SubmitSignedAggregateSelectionProof( + gomock.Any(), // ctx + gomock.AssignableToTypeOf(ðpb.SignedAggregateSubmitRequest{}), + ).Return(ðpb.SignedAggregateSubmitResponse{}, nil) validator.SubmitAggregateAndProof(context.Background(), 0, validatorPubKey) } @@ -50,14 +66,14 @@ func TestSubmitAggregateAndProof_Ok(t *testing.T) { func TestWaitForSlotTwoThird_WaitCorrectly(t *testing.T) { validator, _, finish := setup(t) defer finish() - currentTime := uint64(time.Now().Unix()) + currentTime := uint64(roughtime.Now().Unix()) numOfSlots := uint64(4) validator.genesisTime = currentTime - (numOfSlots * params.BeaconConfig().SecondsPerSlot) timeToSleep := params.BeaconConfig().SecondsPerSlot * 2 / 3 twoThirdTime := currentTime + timeToSleep validator.waitToSlotTwoThirds(context.Background(), numOfSlots) - currentTime = uint64(time.Now().Unix()) + currentTime = uint64(roughtime.Now().Unix()) if currentTime != twoThirdTime { t.Errorf("Wanted %d time for slot two third but got %d", twoThirdTime, currentTime) } diff --git a/validator/client/validator_attest.go b/validator/client/validator_attest.go index 96f8933634..e5046a4b51 100644 --- a/validator/client/validator_attest.go +++ b/validator/client/validator_attest.go @@ -5,18 +5,21 @@ import ( "context" "errors" "fmt" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" slashpb "github.com/prysmaticlabs/prysm/proto/slashing" "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/hashutil" "github.com/prysmaticlabs/prysm/shared/params" + "github.com/prysmaticlabs/prysm/shared/roughtime" + "github.com/prysmaticlabs/prysm/shared/slotutil" "github.com/prysmaticlabs/prysm/validator/keymanager" "github.com/sirupsen/logrus" "go.opencensus.io/trace" @@ -65,6 +68,8 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [ return } + v.waitToSlotOneThird(ctx, slot) + req := ðpb.AttestationDataRequest{ Slot: slot, CommitteeIndex: duty.CommitteeIndex, @@ -78,8 +83,9 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [ return } + var history *slashpb.AttestationHistory if featureconfig.Get().ProtectAttester { - history, err := v.db.AttestationHistory(ctx, pubKey[:]) + history, err = v.db.AttestationHistory(ctx, pubKey[:]) if err != nil { log.Errorf("Could not get attestation history from DB: %v", err) if v.emitAccountMetrics { @@ -143,14 +149,6 @@ func (v *validator) SubmitAttestation(ctx context.Context, slot uint64, pubKey [ } if featureconfig.Get().ProtectAttester { - history, err := v.db.AttestationHistory(ctx, pubKey[:]) - if err != nil { - log.Errorf("Could not get attestation history from DB: %v", err) - if v.emitAccountMetrics { - validatorAttestFailVec.WithLabelValues(fmtKey).Inc() - } - return - } history = markAttestationForTargetEpoch(history, data.Source.Epoch, data.Target.Epoch) if err := v.db.SaveAttestationHistory(ctx, pubKey[:], history); err != nil { log.Errorf("Could not save attestation history to DB: %v", err) @@ -206,16 +204,16 @@ func (v *validator) signAtt(ctx context.Context, pubKey [48]byte, data *ethpb.At return nil, err } - root, err := ssz.HashTreeRoot(data) + root, err := helpers.ComputeSigningRoot(data, domain.SignatureDomain) if err != nil { return nil, err } var sig *bls.Signature if protectingKeymanager, supported := v.keyManager.(keymanager.ProtectingKeyManager); supported { - sig, err = protectingKeymanager.SignAttestation(pubKey, domain.SignatureDomain, data) + sig, err = protectingKeymanager.SignAttestation(pubKey, bytesutil.ToBytes32(domain.SignatureDomain), data) } else { - sig, err = v.keyManager.Sign(pubKey, root, domain.SignatureDomain) + sig, err = v.keyManager.Sign(pubKey, root) } if err != nil { return nil, err @@ -307,3 +305,17 @@ func safeTargetToSource(history *slashpb.AttestationHistory, targetEpoch uint64) } return history.TargetToSource[targetEpoch%wsPeriod] } + +// waitToSlotOneThird waits until one third through the current slot period +// such that head block for beacon node can get updated. +func (v *validator) waitToSlotOneThird(ctx context.Context, slot uint64) { + _, span := trace.StartSpan(ctx, "validator.waitToSlotOneThird") + defer span.End() + + twoThird := params.BeaconConfig().SecondsPerSlot * 1 / 3 + delay := time.Duration(twoThird) * time.Second + + startTime := slotutil.SlotStartTime(v.genesisTime, slot) + finalTime := startTime.Add(delay) + time.Sleep(roughtime.Until(finalTime)) +} diff --git a/validator/client/validator_attest_test.go b/validator/client/validator_attest_test.go index c2596aba45..c6cbfbe846 100644 --- a/validator/client/validator_attest_test.go +++ b/validator/client/validator_attest_test.go @@ -11,13 +11,14 @@ import ( "github.com/golang/mock/gomock" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/go-ssz" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" slashpb "github.com/prysmaticlabs/prysm/proto/slashing" "github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/roughtime" "github.com/prysmaticlabs/prysm/shared/testutil" logTest "github.com/sirupsen/logrus/hooks/test" + "gopkg.in/d4l3k/messagediff.v1" ) func TestRequestAttestation_ValidatorDutiesRequestFailure(t *testing.T) { @@ -87,7 +88,7 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) { m.validatorClient.EXPECT().DomainData( gomock.Any(), // ctx gomock.Any(), // epoch - ).Return(ðpb.DomainResponse{}, nil /*err*/) + ).Return(ðpb.DomainResponse{SignatureDomain: []byte{}}, nil /*err*/) var generatedAttestation *ethpb.Attestation m.validatorClient.EXPECT().ProposeAttestation( @@ -110,18 +111,20 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) { AggregationBits: aggregationBitfield, } - root, err := ssz.HashTreeRoot(expectedAttestation.Data) + root, err := helpers.ComputeSigningRoot(expectedAttestation.Data, []byte{}) if err != nil { t.Fatal(err) } - sig, err := validator.keyManager.Sign(validatorPubKey, root, 0) + sig, err := validator.keyManager.Sign(validatorPubKey, root) if err != nil { t.Fatal(err) } expectedAttestation.Signature = sig.Marshal() if !reflect.DeepEqual(generatedAttestation, expectedAttestation) { t.Errorf("Incorrectly attested head, wanted %v, received %v", expectedAttestation, generatedAttestation) + diff, _ := messagediff.PrettyDiff(expectedAttestation, generatedAttestation) + t.Log(diff) } } diff --git a/validator/client/validator_propose.go b/validator/client/validator_propose.go index 218af5aa9f..336f7c903f 100644 --- a/validator/client/validator_propose.go +++ b/validator/client/validator_propose.go @@ -3,15 +3,15 @@ package client // Validator client proposer functions. import ( "context" - "encoding/binary" "fmt" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/go-ssz" - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/shared/bls" "github.com/prysmaticlabs/prysm/shared/bytesutil" "github.com/prysmaticlabs/prysm/shared/featureconfig" @@ -79,15 +79,16 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by Graffiti: v.graffiti, }) if err != nil { - log.WithError(err).Error("Failed to request block from beacon node") + log.WithField("blockSlot", slot).WithError(err).Error("Failed to request block from beacon node") if v.emitAccountMetrics { validatorProposeFailVec.WithLabelValues(fmtKey).Inc() } return } + var slotBits bitfield.Bitlist if featureconfig.Get().ProtectProposer { - history, err := v.db.ProposalHistory(ctx, pubKey[:]) + slotBits, err = v.db.ProposalHistoryForEpoch(ctx, pubKey[:], epoch) if err != nil { log.WithError(err).Error("Failed to get proposal history") if v.emitAccountMetrics { @@ -96,7 +97,8 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by return } - if HasProposedForEpoch(history, epoch) { + // If the bit for the current slot is marked, do not propose. + if slotBits.BitAt(slot % params.BeaconConfig().SlotsPerEpoch) { log.WithField("epoch", epoch).Error("Tried to sign a double proposal, rejected") if v.emitAccountMetrics { validatorProposeFailVec.WithLabelValues(fmtKey).Inc() @@ -130,16 +132,8 @@ func (v *validator) ProposeBlock(ctx context.Context, slot uint64, pubKey [48]by } if featureconfig.Get().ProtectProposer { - history, err := v.db.ProposalHistory(ctx, pubKey[:]) - if err != nil { - log.WithError(err).Error("Failed to get proposal history") - if v.emitAccountMetrics { - validatorProposeFailVec.WithLabelValues(fmtKey).Inc() - } - return - } - history = SetProposedForEpoch(history, epoch) - if err := v.db.SaveProposalHistory(ctx, pubKey[:], history); err != nil { + slotBits.SetBitAt(slot%params.BeaconConfig().SlotsPerEpoch, true) + if err := v.db.SaveProposalHistoryForEpoch(ctx, pubKey[:], epoch, slotBits); err != nil { log.WithError(err).Error("Failed to save updated proposal history") if v.emitAccountMetrics { validatorProposeFailVec.WithLabelValues(fmtKey).Inc() @@ -175,13 +169,11 @@ func (v *validator) ProposeExit(ctx context.Context, exit *ethpb.VoluntaryExit) // Sign randao reveal with randao domain and private key. func (v *validator) signRandaoReveal(ctx context.Context, pubKey [48]byte, epoch uint64) ([]byte, error) { domain, err := v.domainData(ctx, epoch, params.BeaconConfig().DomainRandao[:]) - if err != nil { return nil, errors.Wrap(err, "could not get domain data") } - var buf [32]byte - binary.LittleEndian.PutUint64(buf[:], epoch) - randaoReveal, err := v.keyManager.Sign(pubKey, buf, domain.SignatureDomain) + + randaoReveal, err := v.signObject(pubKey, epoch, domain.SignatureDomain) if err != nil { return nil, errors.Wrap(err, "could not sign reveal") } @@ -206,61 +198,19 @@ func (v *validator) signBlock(ctx context.Context, pubKey [48]byte, epoch uint64 ParentRoot: b.ParentRoot, BodyRoot: bodyRoot[:], } - sig, err = protectingKeymanager.SignProposal(pubKey, domain.SignatureDomain, blockHeader) + sig, err = protectingKeymanager.SignProposal(pubKey, bytesutil.ToBytes32(domain.SignatureDomain), blockHeader) if err != nil { return nil, errors.Wrap(err, "could not sign block proposal") } } else { - blockRoot, err := ssz.HashTreeRoot(b) + blockRoot, err := helpers.ComputeSigningRoot(b, domain.SignatureDomain) if err != nil { return nil, errors.Wrap(err, "could not get signing root") } - sig, err = v.keyManager.Sign(pubKey, blockRoot, domain.SignatureDomain) + sig, err = v.keyManager.Sign(pubKey, blockRoot) if err != nil { return nil, errors.Wrap(err, "could not sign block proposal") } } return sig.Marshal(), nil } - -// HasProposedForEpoch returns whether a validators proposal history has been marked for the entered epoch. -// If the request is more in the future than what the history contains, it will return false. -// If the request is from the past, and likely previously pruned it will return false. -func HasProposedForEpoch(history *slashpb.ProposalHistory, epoch uint64) bool { - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - // Previously pruned, we should return false. - if int(epoch) <= int(history.LatestEpochWritten)-int(wsPeriod) { - return false - } - // Accessing future proposals that haven't been marked yet. Needs to return false. - if epoch > history.LatestEpochWritten { - return false - } - return history.EpochBits.BitAt(epoch % wsPeriod) -} - -// SetProposedForEpoch updates the proposal history to mark the indicated epoch in the bitlist -// and updates the last epoch written if needed. -// Returns the modified proposal history. -func SetProposedForEpoch(history *slashpb.ProposalHistory, epoch uint64) *slashpb.ProposalHistory { - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - - if epoch > history.LatestEpochWritten { - // If the history is empty, just update the latest written and mark the epoch. - // This is for the first run of a validator. - if history.EpochBits.Count() < 1 { - history.LatestEpochWritten = epoch - history.EpochBits.SetBitAt(epoch%wsPeriod, true) - return history - } - // If the epoch to mark is ahead of latest written epoch, override the old votes and mark the requested epoch. - // Limit the overwriting to one weak subjectivity period as further is not needed. - maxToWrite := history.LatestEpochWritten + wsPeriod - for i := history.LatestEpochWritten + 1; i < epoch && i <= maxToWrite; i++ { - history.EpochBits.SetBitAt(i%wsPeriod, false) - } - history.LatestEpochWritten = epoch - } - history.EpochBits.SetBitAt(epoch%wsPeriod, true) - return history -} diff --git a/validator/client/validator_propose_test.go b/validator/client/validator_propose_test.go index 046d5580a5..7e04200d44 100644 --- a/validator/client/validator_propose_test.go +++ b/validator/client/validator_propose_test.go @@ -6,9 +6,8 @@ import ( "testing" "github.com/golang/mock/gomock" + lru "github.com/hashicorp/golang-lru" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" - "github.com/prysmaticlabs/go-bitfield" - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" "github.com/prysmaticlabs/prysm/shared/featureconfig" "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/shared/testutil" @@ -27,12 +26,19 @@ func setup(t *testing.T) (*validator, *mocks, func()) { m := &mocks{ validatorClient: internal.NewMockBeaconNodeValidatorClient(ctrl), } + + aggregatedSlotCommitteeIDCache, err := lru.New(int(params.BeaconConfig().MaxCommitteesPerSlot)) + if err != nil { + t.Fatal(err) + } + validator := &validator{ - db: valDB, - validatorClient: m.validatorClient, - keyManager: testKeyManager, - graffiti: []byte{}, - attLogs: make(map[[32]byte]*attSubmitted), + db: valDB, + validatorClient: m.validatorClient, + keyManager: testKeyManager, + graffiti: []byte{}, + attLogs: make(map[[32]byte]*attSubmitted), + aggregatedSlotCommitteeIDCache: aggregatedSlotCommitteeIDCache, } return validator, m, ctrl.Finish @@ -223,6 +229,45 @@ func TestProposeBlock_AllowsPastProposals(t *testing.T) { testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal") } +func TestProposeBlock_AllowsSameEpoch(t *testing.T) { + cfg := &featureconfig.Flags{ + ProtectProposer: true, + } + featureconfig.Init(cfg) + hook := logTest.NewGlobal() + validator, m, finish := setup(t) + defer finish() + defer db.TeardownDB(t, validator.db) + + m.validatorClient.EXPECT().DomainData( + gomock.Any(), // ctx + gomock.Any(), //epoch + ).Times(2).Return(ðpb.DomainResponse{}, nil /*err*/) + + m.validatorClient.EXPECT().GetBlock( + gomock.Any(), // ctx + gomock.Any(), + ).Times(2).Return(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}, nil /*err*/) + + m.validatorClient.EXPECT().DomainData( + gomock.Any(), // ctx + gomock.Any(), //epoch + ).Times(2).Return(ðpb.DomainResponse{}, nil /*err*/) + + m.validatorClient.EXPECT().ProposeBlock( + gomock.Any(), // ctx + gomock.AssignableToTypeOf(ðpb.SignedBeaconBlock{}), + ).Times(2).Return(ðpb.ProposeResponse{}, nil /*error*/) + + pubKey := validatorPubKey + farAhead := (params.BeaconConfig().WeakSubjectivityPeriod + 9) * params.BeaconConfig().SlotsPerEpoch + validator.ProposeBlock(context.Background(), farAhead, pubKey) + testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal") + + validator.ProposeBlock(context.Background(), farAhead-4, pubKey) + testutil.AssertLogsDoNotContain(t, hook, "Tried to sign a double proposal") +} + func TestProposeBlock_BroadcastsBlock(t *testing.T) { validator, m, finish := setup(t) defer finish() @@ -287,118 +332,3 @@ func TestProposeBlock_BroadcastsBlock_WithGraffiti(t *testing.T) { t.Errorf("Block was broadcast with the wrong graffiti field, wanted \"%v\", got \"%v\"", string(validator.graffiti), string(sentBlock.Block.Body.Graffiti)) } } - -func TestSetProposedForEpoch_SetsBit(t *testing.T) { - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - proposals := &slashpb.ProposalHistory{ - EpochBits: bitfield.NewBitlist(wsPeriod), - LatestEpochWritten: 0, - } - epoch := uint64(4) - proposals = SetProposedForEpoch(proposals, epoch) - proposed := HasProposedForEpoch(proposals, epoch) - if !proposed { - t.Fatal("Expected epoch 4 to be marked as proposed") - } - // Make sure no other bits are changed. - for i := uint64(1); i <= wsPeriod; i++ { - if i == epoch { - continue - } - if HasProposedForEpoch(proposals, i) { - t.Fatalf("Expected epoch %d to not be marked as proposed", i) - } - } -} - -func TestSetProposedForEpoch_PrunesOverWSPeriod(t *testing.T) { - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - proposals := &slashpb.ProposalHistory{ - EpochBits: bitfield.NewBitlist(wsPeriod), - LatestEpochWritten: 0, - } - prunedEpoch := uint64(3) - proposals = SetProposedForEpoch(proposals, prunedEpoch) - - if proposals.LatestEpochWritten != prunedEpoch { - t.Fatalf("Expected latest epoch written to be %d, received %d", prunedEpoch, proposals.LatestEpochWritten) - } - - epoch := wsPeriod + 4 - proposals = SetProposedForEpoch(proposals, epoch) - if !HasProposedForEpoch(proposals, epoch) { - t.Fatalf("Expected to be marked as proposed for epoch %d", epoch) - } - if proposals.LatestEpochWritten != epoch { - t.Fatalf("Expected latest written epoch to be %d, received %d", epoch, proposals.LatestEpochWritten) - } - - if HasProposedForEpoch(proposals, epoch-wsPeriod+prunedEpoch) { - t.Fatalf("Expected the bit of pruned epoch %d to not be marked as proposed", epoch) - } - // Make sure no other bits are changed. - for i := epoch - wsPeriod + 1; i <= epoch; i++ { - if i == epoch { - continue - } - if HasProposedForEpoch(proposals, i) { - t.Fatalf("Expected epoch %d to not be marked as proposed", i) - } - } -} - -func TestSetProposedForEpoch_KeepsHistory(t *testing.T) { - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - proposals := &slashpb.ProposalHistory{ - EpochBits: bitfield.NewBitlist(wsPeriod), - LatestEpochWritten: 0, - } - randomIndexes := []uint64{23, 423, 8900, 11347, 25033, 52225, 53999} - for i := 0; i < len(randomIndexes); i++ { - proposals = SetProposedForEpoch(proposals, randomIndexes[i]) - } - if proposals.LatestEpochWritten != 53999 { - t.Fatalf("Expected latest epoch written to be %d, received %d", 53999, proposals.LatestEpochWritten) - } - - // Make sure no other bits are changed. - for i := uint64(0); i < wsPeriod; i++ { - setIndex := false - for r := 0; r < len(randomIndexes); r++ { - if i == randomIndexes[r] { - setIndex = true - break - } - } - - if setIndex != HasProposedForEpoch(proposals, i) { - t.Fatalf("Expected epoch %d to be marked as %t", i, setIndex) - } - } - - // Set a past epoch as proposed, and make sure the recent data isn't changed. - proposals = SetProposedForEpoch(proposals, randomIndexes[1]+5) - if proposals.LatestEpochWritten != 53999 { - t.Fatalf("Expected last epoch written to not change after writing a past epoch, received %d", proposals.LatestEpochWritten) - } - // Proposal just marked should be true. - if !HasProposedForEpoch(proposals, randomIndexes[1]+5) { - t.Fatal("Expected marked past epoch to be true, received false") - } - // Previously marked proposal should stay true. - if !HasProposedForEpoch(proposals, randomIndexes[1]) { - t.Fatal("Expected marked past epoch to be true, received false") - } -} - -func TestSetProposedForEpoch_PreventsProposingFutureEpochs(t *testing.T) { - wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod - proposals := &slashpb.ProposalHistory{ - EpochBits: bitfield.NewBitlist(wsPeriod), - LatestEpochWritten: 0, - } - proposals = SetProposedForEpoch(proposals, 200) - if HasProposedForEpoch(proposals, wsPeriod+200) { - t.Fatalf("Expected epoch %d to not be marked as proposed", wsPeriod+200) - } -} diff --git a/validator/client/validator_test.go b/validator/client/validator_test.go index db0cd9481d..e6ae9e705b 100644 --- a/validator/client/validator_test.go +++ b/validator/client/validator_test.go @@ -520,7 +520,7 @@ func TestUpdateDuties_OK(t *testing.T) { CommitteeIndex: 100, Committee: []uint64{0, 1, 2, 3}, PublicKey: []byte("testPubKey_1"), - ProposerSlot: params.BeaconConfig().SlotsPerEpoch + 1, + ProposerSlots: []uint64{params.BeaconConfig().SlotsPerEpoch + 1}, }, }, } @@ -533,14 +533,24 @@ func TestUpdateDuties_OK(t *testing.T) { gomock.Any(), ).Return(resp, nil) + client.EXPECT().GetDuties( + gomock.Any(), + gomock.Any(), + ).Return(resp, nil) + + client.EXPECT().SubscribeCommitteeSubnets( + gomock.Any(), + gomock.Any(), + ).Return(nil, nil) + if err := v.UpdateDuties(context.Background(), slot); err != nil { t.Fatalf("Could not update assignments: %v", err) } - if v.duties.Duties[0].ProposerSlot != params.BeaconConfig().SlotsPerEpoch+1 { + if v.duties.Duties[0].ProposerSlots[0] != params.BeaconConfig().SlotsPerEpoch+1 { t.Errorf( "Unexpected validator assignments. want=%v got=%v", params.BeaconConfig().SlotsPerEpoch+1, - v.duties.Duties[0].ProposerSlot, + v.duties.Duties[0].ProposerSlots[0], ) } if v.duties.Duties[0].AttesterSlot != params.BeaconConfig().SlotsPerEpoch { @@ -585,7 +595,7 @@ func TestRolesAt_OK(t *testing.T) { }, { CommitteeIndex: 2, - ProposerSlot: 1, + ProposerSlots: []uint64{1}, PublicKey: sks[1].PublicKey().Marshal(), }, { @@ -596,7 +606,7 @@ func TestRolesAt_OK(t *testing.T) { { CommitteeIndex: 2, AttesterSlot: 1, - ProposerSlot: 1, + ProposerSlots: []uint64{1, 5}, PublicKey: sks[3].PublicKey().Marshal(), }, }, @@ -606,6 +616,7 @@ func TestRolesAt_OK(t *testing.T) { gomock.Any(), // ctx gomock.Any(), // epoch ).Return(ðpb.DomainResponse{}, nil /*err*/) + m.validatorClient.EXPECT().DomainData( gomock.Any(), // ctx gomock.Any(), // epoch @@ -650,19 +661,19 @@ func TestRolesAt_DoesNotAssignProposer_Slot0(t *testing.T) { { CommitteeIndex: 1, AttesterSlot: 0, - ProposerSlot: 0, + ProposerSlots: []uint64{0}, PublicKey: sks[0].PublicKey().Marshal(), }, { CommitteeIndex: 2, AttesterSlot: 4, - ProposerSlot: 0, + ProposerSlots: nil, PublicKey: sks[1].PublicKey().Marshal(), }, { CommitteeIndex: 1, AttesterSlot: 3, - ProposerSlot: 0, + ProposerSlots: nil, PublicKey: sks[2].PublicKey().Marshal(), }, }, diff --git a/validator/db/BUILD.bazel b/validator/db/BUILD.bazel index c224068810..b9fc03bc3d 100644 --- a/validator/db/BUILD.bazel +++ b/validator/db/BUILD.bazel @@ -19,6 +19,7 @@ go_library( "@com_github_pkg_errors//:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", + "@com_github_wealdtech_go_bytesutil//:go_default_library", "@io_etcd_go_bbolt//:go_default_library", "@io_opencensus_go//trace:go_default_library", ], @@ -33,6 +34,7 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//beacon-chain/core/helpers:go_default_library", "//proto/slashing:go_default_library", "//shared/params:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", diff --git a/validator/db/attestation_history.go b/validator/db/attestation_history.go index 572b531058..e9e657d053 100644 --- a/validator/db/attestation_history.go +++ b/validator/db/attestation_history.go @@ -6,6 +6,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pkg/errors" slashpb "github.com/prysmaticlabs/prysm/proto/slashing" + "github.com/prysmaticlabs/prysm/shared/params" bolt "go.etcd.io/bbolt" "go.opencensus.io/trace" ) @@ -31,6 +32,11 @@ func (db *Store) AttestationHistory(ctx context.Context, publicKey []byte) (*sla bucket := tx.Bucket(historicAttestationsBucket) enc := bucket.Get(publicKey) if enc == nil { + newMap := make(map[uint64]uint64) + newMap[0] = params.BeaconConfig().FarFutureEpoch + attestationHistory = &slashpb.AttestationHistory{ + TargetToSource: newMap, + } return nil } attestationHistory, err = unmarshalAttestationHistory(enc) diff --git a/validator/db/attestation_history_test.go b/validator/db/attestation_history_test.go index 9747edbf71..006060edde 100644 --- a/validator/db/attestation_history_test.go +++ b/validator/db/attestation_history_test.go @@ -9,7 +9,7 @@ import ( "github.com/prysmaticlabs/prysm/shared/params" ) -func TestAttestationHistory_InitializesNewPubKeys(t *testing.T) { +func TestAttestationHistory_EmptyVal(t *testing.T) { pubkeys := [][48]byte{{30}, {25}, {20}} db := SetupDB(t, pubkeys) defer TeardownDB(t, db) @@ -31,22 +31,6 @@ func TestAttestationHistory_InitializesNewPubKeys(t *testing.T) { } } -func TestAttestationHistory_NilDB(t *testing.T) { - db := SetupDB(t, [][48]byte{}) - defer TeardownDB(t, db) - - valPubkey := []byte{1, 2, 3} - - attestationHistory, err := db.AttestationHistory(context.Background(), valPubkey) - if err != nil { - t.Fatal(err) - } - - if attestationHistory != nil { - t.Fatalf("Expected attestation history to be nil, received: %v", attestationHistory) - } -} - func TestSaveAttestationHistory_OK(t *testing.T) { db := SetupDB(t, [][48]byte{}) defer TeardownDB(t, db) @@ -187,7 +171,12 @@ func TestDeleteAttestationHistory_OK(t *testing.T) { if err != nil { t.Fatalf("Failed to get attestation history: %v", err) } - if savedHistory != nil { - t.Fatalf("Expected attestation history to be nil, received %v", savedHistory) + cleanMap := make(map[uint64]uint64) + cleanMap[0] = params.BeaconConfig().FarFutureEpoch + clean := &slashpb.AttestationHistory{ + TargetToSource: cleanMap, + } + if !reflect.DeepEqual(savedHistory, clean) { + t.Fatalf("Expected attestation history to be %v, received %v", clean, savedHistory) } } diff --git a/validator/db/db.go b/validator/db/db.go index e8318c84ed..e043d9cac8 100644 --- a/validator/db/db.go +++ b/validator/db/db.go @@ -1,15 +1,11 @@ package db import ( - "context" "os" "path/filepath" "time" "github.com/pkg/errors" - "github.com/prysmaticlabs/go-bitfield" - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" - "github.com/prysmaticlabs/prysm/shared/params" "github.com/prysmaticlabs/prysm/validator/db/iface" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" @@ -68,7 +64,7 @@ func createBuckets(tx *bolt.Tx, buckets ...[]byte) error { // NewKVStore initializes a new boltDB key-value store at the directory // path specified, creates the kv-buckets based on the schema, and stores // an open connection db object as a property of the Store struct. -func NewKVStore(dirPath string, pubkeys [][48]byte) (*Store, error) { +func NewKVStore(dirPath string, pubKeys [][48]byte) (*Store, error) { if err := os.MkdirAll(dirPath, 0700); err != nil { return nil, err } @@ -93,36 +89,9 @@ func NewKVStore(dirPath string, pubkeys [][48]byte) (*Store, error) { return nil, err } - // Initialize the required pubkeys into the DB to ensure they're not empty. - for _, pubkey := range pubkeys { - proHistory, err := kv.ProposalHistory(context.Background(), pubkey[:]) - if err != nil { - return nil, err - } - if proHistory == nil { - cleanHistory := &slashpb.ProposalHistory{ - EpochBits: bitfield.NewBitlist(params.BeaconConfig().WeakSubjectivityPeriod), - } - if err := kv.SaveProposalHistory(context.Background(), pubkey[:], cleanHistory); err != nil { - return nil, err - } - } - - attHistory, err := kv.AttestationHistory(context.Background(), pubkey[:]) - if err != nil { - return nil, err - } - if attHistory == nil { - newMap := make(map[uint64]uint64) - newMap[0] = params.BeaconConfig().FarFutureEpoch - cleanHistory := &slashpb.AttestationHistory{ - TargetToSource: newMap, - } - if err := kv.SaveAttestationHistory(context.Background(), pubkey[:], cleanHistory); err != nil { - return nil, err - } - } - + // Initialize the required public keys into the DB to ensure they're not empty. + if err := kv.initializeSubBuckets(pubKeys); err != nil { + return nil, err } return kv, err diff --git a/validator/db/iface/BUILD.bazel b/validator/db/iface/BUILD.bazel index 987dc83627..d999fb9acc 100644 --- a/validator/db/iface/BUILD.bazel +++ b/validator/db/iface/BUILD.bazel @@ -6,5 +6,8 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/validator/db/iface", # Other packages must use github.com/prysmaticlabs/prysm/validator/db.Database alias. visibility = ["//validator/db:__subpackages__"], - deps = ["//proto/slashing:go_default_library"], + deps = [ + "//proto/slashing:go_default_library", + "@com_github_prysmaticlabs_go_bitfield//:go_default_library", + ], ) diff --git a/validator/db/iface/interface.go b/validator/db/iface/interface.go index 6bf8ad131e..0b4d1b3af0 100644 --- a/validator/db/iface/interface.go +++ b/validator/db/iface/interface.go @@ -5,6 +5,7 @@ import ( "context" "io" + "github.com/prysmaticlabs/go-bitfield" slashpb "github.com/prysmaticlabs/prysm/proto/slashing" ) @@ -14,8 +15,8 @@ type ValidatorDB interface { DatabasePath() string ClearDB() error // Proposer protection related methods. - ProposalHistory(ctx context.Context, publicKey []byte) (*slashpb.ProposalHistory, error) - SaveProposalHistory(ctx context.Context, publicKey []byte, history *slashpb.ProposalHistory) error + ProposalHistoryForEpoch(ctx context.Context, publicKey []byte, epoch uint64) (bitfield.Bitlist, error) + SaveProposalHistoryForEpoch(ctx context.Context, publicKey []byte, epoch uint64, history bitfield.Bitlist) error DeleteProposalHistory(ctx context.Context, publicKey []byte) error // Attester protection related methods. AttestationHistory(ctx context.Context, publicKey []byte) (*slashpb.AttestationHistory, error) diff --git a/validator/db/proposal_history.go b/validator/db/proposal_history.go index 4d127dc030..c02b20f0b8 100644 --- a/validator/db/proposal_history.go +++ b/validator/db/proposal_history.go @@ -2,56 +2,61 @@ package db import ( "context" + "encoding/binary" + "fmt" - "github.com/gogo/protobuf/proto" "github.com/pkg/errors" - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" + "github.com/prysmaticlabs/go-bitfield" + "github.com/prysmaticlabs/prysm/shared/params" + "github.com/wealdtech/go-bytesutil" bolt "go.etcd.io/bbolt" "go.opencensus.io/trace" ) -func unmarshalProposalHistory(enc []byte) (*slashpb.ProposalHistory, error) { - history := &slashpb.ProposalHistory{} - err := proto.Unmarshal(enc, history) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal encoding") - } - return history, nil -} - -// ProposalHistory accepts a validator public key and returns the corresponding proposal history. +// ProposalHistoryForEpoch accepts a validator public key and returns the corresponding proposal history. // Returns nil if there is no proposal history for the validator. -func (db *Store) ProposalHistory(ctx context.Context, publicKey []byte) (*slashpb.ProposalHistory, error) { - ctx, span := trace.StartSpan(ctx, "Validator.ProposalHistory") +func (db *Store) ProposalHistoryForEpoch(ctx context.Context, publicKey []byte, epoch uint64) (bitfield.Bitlist, error) { + ctx, span := trace.StartSpan(ctx, "Validator.ProposalHistoryForEpoch") defer span.End() var err error - var proposalHistory *slashpb.ProposalHistory + // Using 5 here since a bitfield length of 32 is always 5 bytes long. + slotBitlist := make(bitfield.Bitlist, 5) err = db.view(func(tx *bolt.Tx) error { bucket := tx.Bucket(historicProposalsBucket) - enc := bucket.Get(publicKey) - if enc == nil { + valBucket := bucket.Bucket(publicKey) + if valBucket == nil { + return fmt.Errorf("validator history empty for public key %#x", publicKey) + } + slotBits := valBucket.Get(bytesutil.Bytes8(epoch)) + if slotBits == nil || len(slotBits) == 0 { + slotBitlist = bitfield.NewBitlist(params.BeaconConfig().SlotsPerEpoch) return nil } - proposalHistory, err = unmarshalProposalHistory(enc) - return err + copy(slotBitlist, slotBits) + return nil }) - return proposalHistory, err + return slotBitlist, err } -// SaveProposalHistory returns the proposal history for the requested validator public key. -func (db *Store) SaveProposalHistory(ctx context.Context, pubKey []byte, proposalHistory *slashpb.ProposalHistory) error { - ctx, span := trace.StartSpan(ctx, "Validator.SaveProposalHistory") +// SaveProposalHistoryForEpoch returns the proposal history for the requested validator public key. +func (db *Store) SaveProposalHistoryForEpoch(ctx context.Context, pubKey []byte, epoch uint64, slotBits bitfield.Bitlist) error { + ctx, span := trace.StartSpan(ctx, "Validator.SaveProposalHistoryForEpoch") defer span.End() - enc, err := proto.Marshal(proposalHistory) - if err != nil { - return errors.Wrap(err, "failed to encode proposal history") - } - - err = db.update(func(tx *bolt.Tx) error { + err := db.update(func(tx *bolt.Tx) error { bucket := tx.Bucket(historicProposalsBucket) - return bucket.Put(pubKey, enc) + valBucket := bucket.Bucket(pubKey) + if valBucket == nil { + return fmt.Errorf("validator history is empty for validator %#x", pubKey) + } + if err := valBucket.Put(bytesutil.Bytes8(epoch), slotBits); err != nil { + return err + } + if err := pruneProposalHistory(valBucket, epoch); err != nil { + return err + } + return nil }) return err } @@ -63,9 +68,38 @@ func (db *Store) DeleteProposalHistory(ctx context.Context, pubkey []byte) error return db.update(func(tx *bolt.Tx) error { bucket := tx.Bucket(historicProposalsBucket) - if err := bucket.Delete(pubkey); err != nil { + if err := bucket.DeleteBucket(pubkey); err != nil { return errors.Wrap(err, "failed to delete the proposal history") } return nil }) } + +func pruneProposalHistory(valBucket *bolt.Bucket, newestEpoch uint64) error { + c := valBucket.Cursor() + for k, _ := c.First(); k != nil; k, _ = c.First() { + epoch := binary.LittleEndian.Uint64(k) + // Only delete epochs that are older than the weak subjectivity period. + if epoch+params.BeaconConfig().WeakSubjectivityPeriod <= newestEpoch { + if err := c.Delete(); err != nil { + return errors.Wrapf(err, "could not prune epoch %d in proposal history", epoch) + } + } else { + // If starting from the oldest, we stop finding anything prunable, stop pruning. + break + } + } + return nil +} + +func (db *Store) initializeSubBuckets(pubKeys [][48]byte) error { + return db.update(func(tx *bolt.Tx) error { + bucket := tx.Bucket(historicProposalsBucket) + for _, pubKey := range pubKeys { + if _, err := bucket.CreateBucketIfNotExists(pubKey[:]); err != nil { + return errors.Wrap(err, "failed to create proposal history bucket") + } + } + return nil + }) +} diff --git a/validator/db/proposal_history_test.go b/validator/db/proposal_history_test.go index f5d08c7846..2af711732e 100644 --- a/validator/db/proposal_history_test.go +++ b/validator/db/proposal_history_test.go @@ -1,173 +1,277 @@ package db import ( + "bytes" "context" "reflect" + "strings" "testing" "github.com/prysmaticlabs/go-bitfield" - slashpb "github.com/prysmaticlabs/prysm/proto/slashing" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/shared/params" ) -func TestProposalHistory_InitializesNewPubKeys(t *testing.T) { +func TestProposalHistoryForEpoch_InitializesNewPubKeys(t *testing.T) { pubkeys := [][48]byte{{30}, {25}, {20}} db := SetupDB(t, pubkeys) defer TeardownDB(t, db) for _, pub := range pubkeys { - proposalHistory, err := db.ProposalHistory(context.Background(), pub[:]) + slotBits, err := db.ProposalHistoryForEpoch(context.Background(), pub[:], 0) if err != nil { t.Fatal(err) } - clean := &slashpb.ProposalHistory{ - EpochBits: bitfield.NewBitlist(params.BeaconConfig().WeakSubjectivityPeriod), - } - if !reflect.DeepEqual(proposalHistory, clean) { - t.Fatalf("Expected proposal history epoch bits to be empty, received %v", proposalHistory) + cleanBits := bitfield.NewBitlist(params.BeaconConfig().SlotsPerEpoch) + if !bytes.Equal(slotBits.Bytes(), cleanBits.Bytes()) { + t.Fatalf("Expected proposal history slot bits to be empty, received %v", slotBits.Bytes()) } } } -func TestProposalHistory_NilDB(t *testing.T) { +func TestProposalHistoryForEpoch_NilDB(t *testing.T) { + valPubkey := [48]byte{1, 2, 3} db := SetupDB(t, [][48]byte{}) defer TeardownDB(t, db) - valPubkey := []byte{1, 2, 3} - - proposalHistory, err := db.ProposalHistory(context.Background(), valPubkey) - if err != nil { - t.Fatal(err) + _, err := db.ProposalHistoryForEpoch(context.Background(), valPubkey[:], 0) + if err == nil { + t.Fatal("unexpected non-error") } - if proposalHistory != nil { - t.Fatalf("Expected proposal history to be nil, received: %v", proposalHistory) + if !strings.Contains(err.Error(), "validator history empty for public key") { + t.Fatalf("Unexpected error for nil DB, received: %v", err) } } -func TestSaveProposalHistory_OK(t *testing.T) { - db := SetupDB(t, [][48]byte{}) +func TestSaveProposalHistoryForEpoch_OK(t *testing.T) { + pubkey := [48]byte{3} + db := SetupDB(t, [][48]byte{pubkey}) defer TeardownDB(t, db) - pubkey := []byte{3} epoch := uint64(2) - history := &slashpb.ProposalHistory{ - EpochBits: bitfield.Bitlist{0x04, 0x04}, - LatestEpochWritten: 2, - } + slot := uint64(2) + slotBits := bitfield.Bitlist{0x04, 0x00, 0x00, 0x00, 0x04} - if err := db.SaveProposalHistory(context.Background(), pubkey, history); err != nil { + if err := db.SaveProposalHistoryForEpoch(context.Background(), pubkey[:], epoch, slotBits); err != nil { t.Fatalf("Saving proposal history failed: %v", err) } - savedHistory, err := db.ProposalHistory(context.Background(), pubkey) + savedBits, err := db.ProposalHistoryForEpoch(context.Background(), pubkey[:], epoch) if err != nil { t.Fatalf("Failed to get proposal history: %v", err) } - if savedHistory == nil || !reflect.DeepEqual(history, savedHistory) { - t.Fatalf("Expected DB to keep object the same, received: %v", history) + if savedBits == nil || !bytes.Equal(slotBits, savedBits) { + t.Fatalf("Expected DB to keep object the same, received: %v", savedBits) } - if !savedHistory.EpochBits.BitAt(epoch) { - t.Fatalf("Expected epoch %d to be marked as proposed", history.EpochBits.Count()) + if !savedBits.BitAt(slot) { + t.Fatalf("Expected slot %d to be marked as proposed", slot) } - if savedHistory.EpochBits.BitAt(epoch + 1) { - t.Fatalf("Expected epoch %d to not be marked as proposed", epoch+1) + if savedBits.BitAt(slot + 1) { + t.Fatalf("Expected slot %d to not be marked as proposed", slot+1) } - if savedHistory.EpochBits.BitAt(epoch - 1) { - t.Fatalf("Expected epoch %d to not be marked as proposed", epoch-1) + if savedBits.BitAt(slot - 1) { + t.Fatalf("Expected slot %d to not be marked as proposed", slot-1) } } -func TestSaveProposalHistory_Overwrites(t *testing.T) { - db := SetupDB(t, [][48]byte{}) - defer TeardownDB(t, db) +func TestSaveProposalHistoryForEpoch_Overwrites(t *testing.T) { + pubkey := [48]byte{0} tests := []struct { - pubkey []byte - epoch uint64 - history *slashpb.ProposalHistory + slot uint64 + slotBits bitfield.Bitlist }{ { - pubkey: []byte{0}, - epoch: uint64(1), - history: &slashpb.ProposalHistory{ - EpochBits: bitfield.Bitlist{0x02, 0x02}, - LatestEpochWritten: 1, + slot: uint64(1), + slotBits: bitfield.Bitlist{0x02, 0x00, 0x00, 0x00, 0x02}, + }, + { + slot: uint64(2), + slotBits: bitfield.Bitlist{0x04, 0x00, 0x00, 0x00, 0x04}, + }, + { + slot: uint64(3), + slotBits: bitfield.Bitlist{0x08, 0x00, 0x00, 0x00, 0x08}, + }, + } + + for _, tt := range tests { + db := SetupDB(t, [][48]byte{pubkey}) + defer TeardownDB(t, db) + if err := db.SaveProposalHistoryForEpoch(context.Background(), pubkey[:], 0, tt.slotBits); err != nil { + t.Fatalf("Saving proposal history failed: %v", err) + } + savedBits, err := db.ProposalHistoryForEpoch(context.Background(), pubkey[:], 0) + if err != nil { + t.Fatalf("Failed to get proposal history: %v", err) + } + + if savedBits == nil || !reflect.DeepEqual(savedBits, tt.slotBits) { + t.Fatalf("Expected DB to keep object the same, received: %v, expected %v", savedBits, tt.slotBits) + } + if !savedBits.BitAt(tt.slot) { + t.Fatalf("Expected slot %d to be marked as proposed", tt.slot) + } + if savedBits.BitAt(tt.slot + 1) { + t.Fatalf("Expected slot %d to not be marked as proposed", tt.slot+1) + } + if savedBits.BitAt(tt.slot - 1) { + t.Fatalf("Expected slot %d to not be marked as proposed", tt.slot-1) + } + } +} + +func TestProposalHistoryForEpoch_MultipleEpochs(t *testing.T) { + pubKey := [48]byte{0} + tests := []struct { + slots []uint64 + expectedBits []bitfield.Bitlist + }{ + { + slots: []uint64{1, 2, 8, 31}, + expectedBits: []bitfield.Bitlist{{0b00000110, 0b00000001, 0b00000000, 0b10000000, 0b00000001}}, + }, + { + slots: []uint64{1, 33, 8}, + expectedBits: []bitfield.Bitlist{ + {0b00000010, 0b00000001, 0b00000000, 0b00000000, 0b00000001}, + {0b00000010, 0b00000000, 0b00000000, 0b00000000, 0b00000001}, }, }, { - pubkey: []byte{0}, - epoch: uint64(2), - history: &slashpb.ProposalHistory{ - EpochBits: bitfield.Bitlist{0x04, 0x04}, - LatestEpochWritten: 2, + slots: []uint64{2, 34, 36}, + expectedBits: []bitfield.Bitlist{ + {0b00000100, 0b00000000, 0b00000000, 0b00000000, 0b00000001}, + {0b00010100, 0b00000000, 0b00000000, 0b00000000, 0b00000001}, }, }, { - pubkey: []byte{0}, - epoch: uint64(3), - history: &slashpb.ProposalHistory{ - EpochBits: bitfield.Bitlist{0x08, 0x08}, - LatestEpochWritten: 3, + slots: []uint64{32, 33, 34}, + expectedBits: []bitfield.Bitlist{ + {0, 0, 0, 0, 1}, + {0b00000111, 0b00000000, 0b00000000, 0b00000000, 0b00000001}, }, }, } for _, tt := range tests { - if err := db.SaveProposalHistory(context.Background(), tt.pubkey, tt.history); err != nil { - t.Fatalf("Saving proposal history failed: %v", err) - } - history, err := db.ProposalHistory(context.Background(), tt.pubkey) - if err != nil { - t.Fatalf("Failed to get proposal history: %v", err) + db := SetupDB(t, [][48]byte{pubKey}) + defer TeardownDB(t, db) + for _, slot := range tt.slots { + slotBits, err := db.ProposalHistoryForEpoch(context.Background(), pubKey[:], helpers.SlotToEpoch(slot)) + if err != nil { + t.Fatalf("Failed to get proposal history: %v", err) + } + slotBits.SetBitAt(slot%params.BeaconConfig().SlotsPerEpoch, true) + if err := db.SaveProposalHistoryForEpoch(context.Background(), pubKey[:], helpers.SlotToEpoch(slot), slotBits); err != nil { + t.Fatalf("Saving proposal history failed: %v", err) + } } - if history == nil || !reflect.DeepEqual(history, tt.history) { - t.Fatalf("Expected DB to keep object the same, received: %v", history) + for i, slotBits := range tt.expectedBits { + savedBits, err := db.ProposalHistoryForEpoch(context.Background(), pubKey[:], uint64(i)) + if err != nil { + t.Fatalf("Failed to get proposal history: %v", err) + } + if !bytes.Equal(slotBits, savedBits) { + t.Fatalf("unexpected difference in bytes for slots %v, expected %v vs received %v", tt.slots, slotBits, savedBits) + } } - if !history.EpochBits.BitAt(tt.epoch) { - t.Fatalf("Expected epoch %d to be marked as proposed", history.EpochBits.Count()) + } +} + +func TestPruneProposalHistory_OK(t *testing.T) { + slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch + wsPeriod := params.BeaconConfig().WeakSubjectivityPeriod + pubKey := [48]byte{0} + tests := []struct { + slots []uint64 + storedEpochs []uint64 + removedEpochs []uint64 + }{ + { + // Go 2 epochs past pruning point. + slots: []uint64{slotsPerEpoch / 2, slotsPerEpoch*5 + 6, (wsPeriod+3)*slotsPerEpoch + 8}, + storedEpochs: []uint64{5, 54003}, + removedEpochs: []uint64{0}, + }, + { + // Go 10 epochs past pruning point. + slots: []uint64{slotsPerEpoch + 4, slotsPerEpoch * 2, slotsPerEpoch * 3, slotsPerEpoch * 4, slotsPerEpoch * 5, (wsPeriod+10)*slotsPerEpoch + 8}, + storedEpochs: []uint64{54010}, + removedEpochs: []uint64{1, 2, 3, 4}, + }, + { + // Prune none. + slots: []uint64{slotsPerEpoch + 4, slotsPerEpoch*2 + 3, slotsPerEpoch*3 + 4, slotsPerEpoch*4 + 3, slotsPerEpoch*5 + 3}, + storedEpochs: []uint64{1, 2, 3, 4, 5}, + }, + } + + for _, tt := range tests { + db := SetupDB(t, [][48]byte{pubKey}) + defer TeardownDB(t, db) + for _, slot := range tt.slots { + slotBits, err := db.ProposalHistoryForEpoch(context.Background(), pubKey[:], helpers.SlotToEpoch(slot)) + if err != nil { + t.Fatalf("Failed to get proposal history: %v", err) + } + slotBits.SetBitAt(slot%params.BeaconConfig().SlotsPerEpoch, true) + if err := db.SaveProposalHistoryForEpoch(context.Background(), pubKey[:], helpers.SlotToEpoch(slot), slotBits); err != nil { + t.Fatalf("Saving proposal history failed: %v", err) + } } - if history.EpochBits.BitAt(tt.epoch + 1) { - t.Fatalf("Expected epoch %d to not be marked as proposed", tt.epoch+1) + + for _, epoch := range tt.removedEpochs { + savedBits, err := db.ProposalHistoryForEpoch(context.Background(), pubKey[:], epoch) + if err != nil { + t.Fatalf("Failed to get proposal history: %v", err) + } + if !bytes.Equal(bitfield.NewBitlist(slotsPerEpoch), savedBits) { + t.Fatalf("unexpected difference in bytes for epoch %d, expected %#x vs received %v", epoch, bitfield.NewBitlist(slotsPerEpoch), savedBits) + } } - if history.EpochBits.BitAt(tt.epoch - 1) { - t.Fatalf("Expected epoch %d to not be marked as proposed", tt.epoch-1) + for _, epoch := range tt.storedEpochs { + savedBits, err := db.ProposalHistoryForEpoch(context.Background(), pubKey[:], epoch) + if err != nil { + t.Fatalf("Failed to get proposal history: %v", err) + } + if bytes.Equal(bitfield.NewBitlist(slotsPerEpoch), savedBits) { + t.Fatalf("unexpected difference in bytes for epoch %d, expected %v vs received %v", epoch, bitfield.NewBitlist(slotsPerEpoch), savedBits) + } } } } func TestDeleteProposalHistory_OK(t *testing.T) { - db := SetupDB(t, [][48]byte{}) + pubkey := [48]byte{2} + db := SetupDB(t, [][48]byte{pubkey}) defer TeardownDB(t, db) - pubkey := []byte{2} - history := &slashpb.ProposalHistory{ - EpochBits: bitfield.Bitlist{0x01, 0x02}, - LatestEpochWritten: 1, - } + slotBits := bitfield.Bitlist{0x01, 0x00, 0x00, 0x00, 0x02} - if err := db.SaveProposalHistory(context.Background(), pubkey, history); err != nil { + if err := db.SaveProposalHistoryForEpoch(context.Background(), pubkey[:], 0, slotBits); err != nil { t.Fatalf("Save proposal history failed: %v", err) } // Making sure everything is saved. - savedHistory, err := db.ProposalHistory(context.Background(), pubkey) + savedHistory, err := db.ProposalHistoryForEpoch(context.Background(), pubkey[:], 0) if err != nil { t.Fatalf("Failed to get proposal history: %v", err) } - if savedHistory == nil || !reflect.DeepEqual(savedHistory, history) { - t.Fatalf("Expected DB to keep object the same, received: %v, expected %v", savedHistory, history) + if savedHistory == nil || !bytes.Equal(savedHistory, slotBits) { + t.Fatalf("Expected DB to keep object the same, received: %v, expected %v", savedHistory, slotBits) } - if err := db.DeleteProposalHistory(context.Background(), pubkey); err != nil { + if err := db.DeleteProposalHistory(context.Background(), pubkey[:]); err != nil { t.Fatal(err) } // Check after deleting from DB. - savedHistory, err = db.ProposalHistory(context.Background(), pubkey) - if err != nil { - t.Fatalf("Failed to get proposal history: %v", err) + _, err = db.ProposalHistoryForEpoch(context.Background(), pubkey[:], 0) + if err == nil { + t.Fatalf("Unexpected success in deleting history: %v", err) } - if savedHistory != nil { - t.Fatalf("Expected proposal history to be nil, received %v", savedHistory) + if !strings.Contains(err.Error(), "validator history empty for public key ") { + t.Fatalf("Unexpected error, received %v", err) } } diff --git a/validator/flags/flags.go b/validator/flags/flags.go index a9fb22b655..4f4ab7f65c 100644 --- a/validator/flags/flags.go +++ b/validator/flags/flags.go @@ -5,11 +5,6 @@ import ( ) var ( - // NoCustomConfigFlag determines whether to launch a beacon chain using real parameters or demo parameters. - NoCustomConfigFlag = &cli.BoolFlag{ - Name: "no-custom-config", - Usage: "Run the beacon chain with the real parameters from phase 0.", - } // BeaconRPCProviderFlag defines a beacon node RPC endpoint. BeaconRPCProviderFlag = &cli.StringFlag{ Name: "beacon-rpc-provider", diff --git a/validator/internal/beacon_node_validator_service_mock.go b/validator/internal/beacon_node_validator_service_mock.go index f1447a82b9..1de7ac2034 100644 --- a/validator/internal/beacon_node_validator_service_mock.go +++ b/validator/internal/beacon_node_validator_service_mock.go @@ -10,6 +10,7 @@ import ( ptypes "github.com/gogo/protobuf/types" gomock "github.com/golang/mock/gomock" + eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" grpc "google.golang.org/grpc" metadata "google.golang.org/grpc/metadata" @@ -238,24 +239,44 @@ func (mr *MockBeaconNodeValidatorClientMockRecorder) ProposeAttestation(ctx, in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProposeAttestation", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).ProposeAttestation), varargs...) } -// SubmitAggregateAndProof mocks base method -func (m *MockBeaconNodeValidatorClient) SubmitAggregateAndProof(ctx context.Context, in *ethpb.AggregationRequest, opts ...grpc.CallOption) (*ethpb.AggregationResponse, error) { +// SubmitAggregateSelectionProof mocks base method +func (m *MockBeaconNodeValidatorClient) SubmitAggregateSelectionProof(arg0 context.Context, arg1 *ethpb.AggregateSelectionRequest, arg2 ...grpc.CallOption) (*ethpb.AggregateSelectionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "SubmitAggregateAndProof", varargs...) - ret0, _ := ret[0].(*ethpb.AggregationResponse) + ret := m.ctrl.Call(m, "SubmitAggregateSelectionProof", varargs...) + ret0, _ := ret[0].(*ethpb.AggregateSelectionResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// SubmitAggregateAndProof indicates an expected call of SubmitAggregateAndProof -func (mr *MockBeaconNodeValidatorClientMockRecorder) SubmitAggregateAndProof(ctx, in interface{}, opts ...interface{}) *gomock.Call { +// SubmitAggregateSelectionProof indicates an expected call of SubmitAggregateSelectionProof +func (mr *MockBeaconNodeValidatorClientMockRecorder) SubmitAggregateSelectionProof(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitAggregateAndProof", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).SubmitAggregateAndProof), varargs...) + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitAggregateSelectionProof", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).SubmitAggregateSelectionProof), varargs...) +} + +// SubmitSignedAggregateSelectionProof mocks base method +func (m *MockBeaconNodeValidatorClient) SubmitSignedAggregateSelectionProof(arg0 context.Context, arg1 *ethpb.SignedAggregateSubmitRequest, arg2 ...grpc.CallOption) (*ethpb.SignedAggregateSubmitResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SubmitSignedAggregateSelectionProof", varargs...) + ret0, _ := ret[0].(*ethpb.SignedAggregateSubmitResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubmitSignedAggregateSelectionProof indicates an expected call of SubmitSignedAggregateSelectionProof +func (mr *MockBeaconNodeValidatorClientMockRecorder) SubmitSignedAggregateSelectionProof(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitSignedAggregateSelectionProof", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).SubmitSignedAggregateSelectionProof), varargs...) } // ProposeExit mocks base method @@ -695,21 +716,6 @@ func (mr *MockBeaconNodeValidatorServerMockRecorder) ProposeAttestation(arg0, ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProposeAttestation", reflect.TypeOf((*MockBeaconNodeValidatorServer)(nil).ProposeAttestation), arg0, arg1) } -// SubmitAggregateAndProof mocks base method -func (m *MockBeaconNodeValidatorServer) SubmitAggregateAndProof(arg0 context.Context, arg1 *ethpb.AggregationRequest) (*ethpb.AggregationResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubmitAggregateAndProof", arg0, arg1) - ret0, _ := ret[0].(*ethpb.AggregationResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SubmitAggregateAndProof indicates an expected call of SubmitAggregateAndProof -func (mr *MockBeaconNodeValidatorServerMockRecorder) SubmitAggregateAndProof(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitAggregateAndProof", reflect.TypeOf((*MockBeaconNodeValidatorServer)(nil).SubmitAggregateAndProof), arg0, arg1) -} - // ProposeExit mocks base method func (m *MockBeaconNodeValidatorServer) ProposeExit(arg0 context.Context, arg1 *ethpb.SignedVoluntaryExit) (*ptypes.Empty, error) { m.ctrl.T.Helper() @@ -962,3 +968,43 @@ func (mr *MockBeaconNodeValidator_WaitForChainStartServerMockRecorder) RecvMsg(m mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockBeaconNodeValidator_WaitForChainStartServer)(nil).RecvMsg), m) } + +// SubscribeCommitteeSubnets mocks base method +func (m *MockBeaconNodeValidatorClient) SubscribeCommitteeSubnets(arg0 context.Context, arg1 *eth.CommitteeSubnetsSubscribeRequest, arg2 ...grpc.CallOption) (*ptypes.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SubscribeCommitteeSubnets", varargs...) + ret0, _ := ret[0].(*ptypes.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubscribeCommitteeSubnets indicates an expected call of SubscribeCommitteeSubnets +func (mr *MockBeaconNodeValidatorClientMockRecorder) SubscribeCommitteeSubnets(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeCommitteeSubnets", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).SubscribeCommitteeSubnets), varargs...) +} + +// WaitForSynced mocks base method +func (m *MockBeaconNodeValidatorClient) WaitForSynced(arg0 context.Context, arg1 *ptypes.Empty, arg2 ...grpc.CallOption) (eth.BeaconNodeValidator_WaitForSyncedClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WaitForSynced", varargs...) + ret0, _ := ret[0].(eth.BeaconNodeValidator_WaitForSyncedClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WaitForSynced indicates an expected call of WaitForSynced +func (mr *MockBeaconNodeValidatorClientMockRecorder) WaitForSynced(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForSynced", reflect.TypeOf((*MockBeaconNodeValidatorClient)(nil).WaitForSynced), varargs...) +} diff --git a/validator/keymanager/BUILD.bazel b/validator/keymanager/BUILD.bazel index 4db6b5eb7a..65cdb2b70d 100644 --- a/validator/keymanager/BUILD.bazel +++ b/validator/keymanager/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "keymanager.go", "log.go", "opts.go", + "remote.go", "wallet.go", ], importpath = "github.com/prysmaticlabs/prysm/validator/keymanager", @@ -19,11 +20,15 @@ go_library( "//shared/bytesutil:go_default_library", "//shared/interop:go_default_library", "//validator/accounts:go_default_library", + "@com_github_pkg_errors//:go_default_library", "@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", + "@com_github_wealdtech_eth2_signer_api//pb/v1:go_default_library", "@com_github_wealdtech_go_eth2_wallet//:go_default_library", "@com_github_wealdtech_go_eth2_wallet_store_filesystem//:go_default_library", "@com_github_wealdtech_go_eth2_wallet_types//:go_default_library", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//credentials:go_default_library", "@org_golang_x_crypto//ssh/terminal:go_default_library", ], ) @@ -34,12 +39,14 @@ go_test( "direct_interop_test.go", "direct_test.go", "opts_test.go", + "remote_test.go", "wallet_test.go", ], embed = [":go_default_library"], deps = [ "//shared/bls:go_default_library", "//shared/bytesutil:go_default_library", + "//shared/testutil:go_default_library", "@com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4//:go_default_library", "@com_github_wealdtech_go_eth2_wallet_nd//:go_default_library", "@com_github_wealdtech_go_eth2_wallet_store_filesystem//:go_default_library", diff --git a/validator/keymanager/direct.go b/validator/keymanager/direct.go index ee43c2de24..d59e68ebe0 100644 --- a/validator/keymanager/direct.go +++ b/validator/keymanager/direct.go @@ -38,9 +38,9 @@ func (km *Direct) FetchValidatingKeys() ([][48]byte, error) { } // Sign signs a message for the validator to broadcast. -func (km *Direct) Sign(pubKey [48]byte, root [32]byte, domain uint64) (*bls.Signature, error) { +func (km *Direct) Sign(pubKey [48]byte, root [32]byte) (*bls.Signature, error) { if secretKey, exists := km.secretKeys[pubKey]; exists { - return secretKey.Sign(root[:], domain), nil + return secretKey.Sign(root[:]), nil } return nil, ErrNoSuchKey } diff --git a/validator/keymanager/direct_test.go b/validator/keymanager/direct_test.go index 8085be25da..9e368af9d2 100644 --- a/validator/keymanager/direct_test.go +++ b/validator/keymanager/direct_test.go @@ -54,7 +54,7 @@ func TestSignNoSuchKey(t *testing.T) { // sks = append(sks, bls.RandKey()) direct := keymanager.NewDirect(sks) - sig, err := direct.Sign([48]byte{}, [32]byte{}, 0) + sig, err := direct.Sign([48]byte{}, [32]byte{}) if err != keymanager.ErrNoSuchKey { t.Fatalf("Incorrect error: expected %v, received %v", keymanager.ErrNoSuchKey, err) } @@ -68,11 +68,11 @@ func TestSign(t *testing.T) { pubKey := bytesutil.ToBytes48(sks[0].PublicKey().Marshal()) msg := [32]byte{} - sig, err := direct.Sign(pubKey, msg, 0) + sig, err := direct.Sign(pubKey, msg) if err != nil { t.Fatalf("Unexpected error: %v", err) } - if !sig.Verify(bytesutil.FromBytes32(msg), sks[0].PublicKey(), 0) { + if !sig.Verify(bytesutil.FromBytes32(msg), sks[0].PublicKey()) { t.Fatal("Failed to verify generated signature") } } diff --git a/validator/keymanager/keymanager.go b/validator/keymanager/keymanager.go index 62c56855b7..f803f84386 100644 --- a/validator/keymanager/keymanager.go +++ b/validator/keymanager/keymanager.go @@ -13,22 +13,27 @@ var ErrNoSuchKey = errors.New("no such key") // ErrCannotSign is returned whenever a signing attempt fails. var ErrCannotSign = errors.New("cannot sign") -// ErrCouldSlash is returned whenever a signing attempt is refused due to a potential slashing event. -var ErrCouldSlash = errors.New("could result in a slashing event") +// ErrDenied is returned whenever a signing attempt is denied. +var ErrDenied = errors.New("signing attempt denied") // KeyManager controls access to private keys by the validator. type KeyManager interface { // FetchValidatingKeys fetches the list of public keys that should be used to validate with. FetchValidatingKeys() ([][48]byte, error) // Sign signs a message for the validator to broadcast. - Sign(pubKey [48]byte, root [32]byte, domain uint64) (*bls.Signature, error) + // Note that the domain should already be part of the root, but it is passed along for security purposes. + Sign(pubKey [48]byte, root [32]byte) (*bls.Signature, error) } // ProtectingKeyManager provides access to a keymanager that protects its clients from slashing events. type ProtectingKeyManager interface { + // SignGeneric signs a generic root. + // Note that the domain should already be part of the root, but it is provided for authorisation purposes. + SignGeneric(pubKey [48]byte, root [32]byte, domain [32]byte) (*bls.Signature, error) + // SignProposal signs a block proposal for the validator to broadcast. - SignProposal(pubKey [48]byte, domain uint64, data *ethpb.BeaconBlockHeader) (*bls.Signature, error) + SignProposal(pubKey [48]byte, domain [32]byte, data *ethpb.BeaconBlockHeader) (*bls.Signature, error) // SignAttestation signs an attestation for the validator to broadcast. - SignAttestation(pubKey [48]byte, domain uint64, data *ethpb.AttestationData) (*bls.Signature, error) + SignAttestation(pubKey [48]byte, domain [32]byte, data *ethpb.AttestationData) (*bls.Signature, error) } diff --git a/validator/keymanager/remote.go b/validator/keymanager/remote.go new file mode 100644 index 0000000000..675a0acbb0 --- /dev/null +++ b/validator/keymanager/remote.go @@ -0,0 +1,267 @@ +package keymanager + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "io/ioutil" + + "github.com/pkg/errors" + ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1" + "github.com/prysmaticlabs/prysm/shared/bls" + "github.com/prysmaticlabs/prysm/shared/bytesutil" + pb "github.com/wealdtech/eth2-signer-api/pb/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// Remote is a key manager that accesses a remote wallet daemon. +type Remote struct { + paths []string + conn *grpc.ClientConn + accounts map[[48]byte]*accountInfo + signClientInitiator func(*grpc.ClientConn) +} + +type accountInfo struct { + Name string `json:"name"` + PubKey []byte `json:"pubkey"` +} + +type remoteOpts struct { + Location string `json:"location"` + Accounts []string `json:"accounts"` + Certificates *remoteCertificateOpts `json:"certificates"` +} + +type remoteCertificateOpts struct { + CACert string `json:"ca_cert"` + ClientCert string `json:"client_cert"` + ClientKey string `json:"client_key"` +} + +var remoteOptsHelp = `The remote key manager connects to a walletd instance. The options are: + - location This is the location to look for wallets. If not supplied it will + use the standard (operating system-dependent) path. + - accounts This is a list of account specifiers. An account specifier is of + the form /[account name], where the account name can be a + regular expression. If the account specifier is just all + accounts in that wallet will be used. Multiple account specifiers can be + supplied if required. + - certificates This provides paths to certificates: + - ca_cert This is the path to the server's certificate authority certificate file + - client_cert This is the path to the client's certificate file + - client_key This is the path to the client's key file + +An sample keymanager options file (with annotations; these should be removed if +using this as a template) is: + + { + "location": "host.example.com:12345", // Connect to walletd at host.example.com on port 12345 + "accounts": ["Validators/Account.*"] // Use all accounts in the 'Validators' wallet starting with 'Account' + "certificates": { + "ca_cert": "/home/eth2/certs/ca.crt" // Certificate file for the CA that signed the server's certificate + "client_cert": "/home/eth2/certs/client.crt" // Certificate file for this client + "client_key": "/home/eth2/certs/client.key" // Key file for this client + } + }` + +// NewRemoteWallet creates a key manager populated with the keys from walletd. +func NewRemoteWallet(input string) (KeyManager, string, error) { + opts := &remoteOpts{} + err := json.Unmarshal([]byte(input), opts) + if err != nil { + return nil, remoteOptsHelp, err + } + + if len(opts.Accounts) == 0 { + return nil, remoteOptsHelp, errors.New("at least one account specifier is required") + } + + // Load the client certificates. + if opts.Certificates == nil { + return nil, remoteOptsHelp, errors.New("certificates are required") + } + if opts.Certificates.ClientCert == "" { + return nil, remoteOptsHelp, errors.New("client certificate is required") + } + if opts.Certificates.ClientKey == "" { + return nil, remoteOptsHelp, errors.New("client key is required") + } + clientPair, err := tls.LoadX509KeyPair(opts.Certificates.ClientCert, opts.Certificates.ClientKey) + if err != nil { + return nil, remoteOptsHelp, errors.Wrap(err, "failed to obtain client's certificate and/or key") + } + + // Load the CA for the server certificate if present. + cp := x509.NewCertPool() + if opts.Certificates.CACert != "" { + serverCA, err := ioutil.ReadFile(opts.Certificates.CACert) + if err != nil { + return nil, remoteOptsHelp, errors.Wrap(err, "failed to obtain server's CA certificate") + } + if !cp.AppendCertsFromPEM(serverCA) { + return nil, remoteOptsHelp, errors.Wrap(err, "failed to add server's CA certificate to pool") + } + } + + tlsCfg := &tls.Config{ + Certificates: []tls.Certificate{clientPair}, + RootCAs: cp, + } + clientCreds := credentials.NewTLS(tlsCfg) + + grpcOpts := []grpc.DialOption{ + // Require TLS with client certificate. + grpc.WithTransportCredentials(clientCreds), + } + + conn, err := grpc.Dial(opts.Location, grpcOpts...) + if err != nil { + return nil, remoteOptsHelp, errors.New("failed to connect to remote wallet") + } + + km := &Remote{ + conn: conn, + paths: opts.Accounts, + } + + err = km.RefreshValidatingKeys() + if err != nil { + return nil, remoteOptsHelp, errors.New("failed to fetch accounts from remote wallet") + } + + return km, remoteOptsHelp, nil +} + +// FetchValidatingKeys fetches the list of public keys that should be used to validate with. +func (km *Remote) FetchValidatingKeys() ([][48]byte, error) { + res := make([][48]byte, 0, len(km.accounts)) + for _, accountInfo := range km.accounts { + res = append(res, bytesutil.ToBytes48(accountInfo.PubKey)) + } + return res, nil +} + +// Sign without protection is not supported by remote keymanagers. +func (km *Remote) Sign(pubKey [48]byte, root [32]byte) (*bls.Signature, error) { + return nil, errors.New("remote keymanager does not support unprotected signing") +} + +// SignGeneric signs a generic message for the validator to broadcast. +func (km *Remote) SignGeneric(pubKey [48]byte, root [32]byte, domain [32]byte) (*bls.Signature, error) { + accountInfo, exists := km.accounts[pubKey] + if !exists { + return nil, ErrNoSuchKey + } + + client := pb.NewSignerClient(km.conn) + req := &pb.SignRequest{ + Id: &pb.SignRequest_Account{Account: accountInfo.Name}, + Data: root[:], + Domain: domain[:], + } + resp, err := client.Sign(context.Background(), req) + if err != nil { + return nil, err + } + switch resp.State { + case pb.SignState_DENIED: + return nil, ErrDenied + case pb.SignState_FAILED: + return nil, ErrCannotSign + } + return bls.SignatureFromBytes(resp.Signature) +} + +// SignProposal signs a block proposal for the validator to broadcast. +func (km *Remote) SignProposal(pubKey [48]byte, domain [32]byte, data *ethpb.BeaconBlockHeader) (*bls.Signature, error) { + accountInfo, exists := km.accounts[pubKey] + if !exists { + return nil, ErrNoSuchKey + } + + client := pb.NewSignerClient(km.conn) + req := &pb.SignBeaconProposalRequest{ + Id: &pb.SignBeaconProposalRequest_Account{Account: accountInfo.Name}, + Domain: domain[:], + Data: &pb.BeaconBlockHeader{ + Slot: data.Slot, + ParentRoot: data.ParentRoot, + StateRoot: data.StateRoot, + BodyRoot: data.BodyRoot, + }, + } + resp, err := client.SignBeaconProposal(context.Background(), req) + if err != nil { + return nil, err + } + switch resp.State { + case pb.SignState_DENIED: + return nil, ErrDenied + case pb.SignState_FAILED: + return nil, ErrCannotSign + } + return bls.SignatureFromBytes(resp.Signature) +} + +// SignAttestation signs an attestation for the validator to broadcast. +func (km *Remote) SignAttestation(pubKey [48]byte, domain [32]byte, data *ethpb.AttestationData) (*bls.Signature, error) { + accountInfo, exists := km.accounts[pubKey] + if !exists { + return nil, ErrNoSuchKey + } + + client := pb.NewSignerClient(km.conn) + req := &pb.SignBeaconAttestationRequest{ + Id: &pb.SignBeaconAttestationRequest_Account{Account: accountInfo.Name}, + Domain: domain[:], + Data: &pb.AttestationData{ + Slot: data.Slot, + CommitteeIndex: data.CommitteeIndex, + BeaconBlockRoot: data.BeaconBlockRoot, + Source: &pb.Checkpoint{ + Epoch: data.Source.Epoch, + Root: data.Source.Root, + }, + Target: &pb.Checkpoint{ + Epoch: data.Target.Epoch, + Root: data.Target.Root, + }, + }, + } + resp, err := client.SignBeaconAttestation(context.Background(), req) + if err != nil { + return nil, err + } + switch resp.State { + case pb.SignState_DENIED: + return nil, ErrDenied + case pb.SignState_FAILED: + return nil, ErrCannotSign + } + return bls.SignatureFromBytes(resp.Signature) +} + +// RefreshValidatingKeys refreshes the list of validating keys from the remote signer. +func (km *Remote) RefreshValidatingKeys() error { + listerClient := pb.NewListerClient(km.conn) + listAccountsReq := &pb.ListAccountsRequest{ + Paths: km.paths, + } + accountsResp, err := listerClient.ListAccounts(context.Background(), listAccountsReq) + if err != nil { + panic(err) + } + accounts := make(map[[48]byte]*accountInfo, len(accountsResp.Accounts)) + for _, account := range accountsResp.Accounts { + account := &accountInfo{ + Name: account.Name, + PubKey: account.PublicKey, + } + accounts[bytesutil.ToBytes48(account.PubKey)] = account + } + km.accounts = accounts + return nil +} diff --git a/validator/keymanager/remote_test.go b/validator/keymanager/remote_test.go new file mode 100644 index 0000000000..8b62a71916 --- /dev/null +++ b/validator/keymanager/remote_test.go @@ -0,0 +1,204 @@ +package keymanager_test + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/prysmaticlabs/prysm/shared/testutil" + "github.com/prysmaticlabs/prysm/validator/keymanager" +) + +var validClientCert = `-----BEGIN CERTIFICATE----- +MIIEITCCAgmgAwIBAgIQXUJWQZgVO4IX+zlWGI1/mTANBgkqhkiG9w0BAQsFADAU +MRIwEAYDVQQDEwlBdHRlc3RhbnQwHhcNMjAwMzE3MDgwNjU3WhcNMjEwOTE3MDc1 +OTUyWjASMRAwDgYDVQQDEwdjbGllbnQxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsc977g16Tan2j7YuA+zQOlDntb4Bkfs4sDOznOEvnozHwRZOgfcP +jVcA9AS5eZOGIRrsTssptrgVNDPoIHWoKk7LAKyyLM3dGp5PWeyMBoQA5cq+yPAT +4JkJpDnBFfwxXB99osJH0z3jSTRa62CSVvPRBisK4B9AlLQfcleEQlKJugy9tOAj +G7zodwEi+J4AYQHmOiwL38ZsKq9We5y4HMQ0E7de0FoU5QHrtuPNrTuwVwrq825l +cEAAFey6Btngx+sziysPHWHYOq4xOZ1UPBApeaAFLguzusc/4VwM7kzRNr4VOD8a +eC3CtKLhBBVVxHI5ZlaHS+YylNGYD4+FxQIDAQABo3EwbzAOBgNVHQ8BAf8EBAMC +A7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQDGCE0 +3k4rHzB+Ycf3pt1MzeDPgzAfBgNVHSMEGDAWgBScIYZa4dQBIW/gVwR0ctGCuHhe +9jANBgkqhkiG9w0BAQsFAAOCAgEAHG/EfvqIwbhYfci+zRCYC7aQPuvhivJblBwN +mbXo2qsxvje1hcKm0ptJLOy/cjJzeLJYREhQlXDPRJC/xgELnbXRjgag82r35+pf +wVJwP6Yw53VCM3o0QKsUrKyMm4sAijOBrJyqpB5untAieZsry5Bfj0S4YobbtdJa +VsEioU07fVVczf5lYN0XrLgRnXq3LMkTiZ6drFiqLkwmXQZVxNujmcaFSm7yCALl +EdhYNmaqedS5me5UOGxwPacrsZwWF9dvMsl3OswgTcaGdsUtx2/q+S2vbZUAM/Gw +qaTanDfvVtVTF7KzVN9hiqKe4mO0HHHK2HWJYBLdRJjInOgRW+53hCmUhLxD+Dq+ +31jLKxn/Y4hyH9E+55b1sJHCFpsbEtVD53fojiH2C/uLbhq4Wr1PXgOoxzf2KeSQ +B3ENu8C4b6AlNhqOnz5zeDcx8Ug0vMfVDAwf6RAYMG5b/MoWNKcLNXhk8H1nbVkt +16ppjh6I27JqfNqfP2J/p3BF++ZugZuWfN9DRaJ6UPz+yyF7eW8fyDAQNl7LS0Kh +8PlF5cYvyIIKVHe38Mn8ZAWboKUs0xNv2vhA9V/4Q1ZzAEkXjmbk8H26sjGvJnvg +Lgm/+6LVWR4EnUlU8aEWASEpTWq2lSRF3ZOvNstHnufyiDfcwDcl/IKKQiVQQ3mX +tw8Jf74= +-----END CERTIFICATE-----` +var validClientKey = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsc977g16Tan2j7YuA+zQOlDntb4Bkfs4sDOznOEvnozHwRZO +gfcPjVcA9AS5eZOGIRrsTssptrgVNDPoIHWoKk7LAKyyLM3dGp5PWeyMBoQA5cq+ +yPAT4JkJpDnBFfwxXB99osJH0z3jSTRa62CSVvPRBisK4B9AlLQfcleEQlKJugy9 +tOAjG7zodwEi+J4AYQHmOiwL38ZsKq9We5y4HMQ0E7de0FoU5QHrtuPNrTuwVwrq +825lcEAAFey6Btngx+sziysPHWHYOq4xOZ1UPBApeaAFLguzusc/4VwM7kzRNr4V +OD8aeC3CtKLhBBVVxHI5ZlaHS+YylNGYD4+FxQIDAQABAoIBAQCjV2MVcDQmHDhw +FH95A5bVu3TgM8flfs64rwYU25iPIexuqDs+kOMsh/xMLfrkgGz7BGyIhYGwZLK1 +3ekjyHHPS8qYuAyFtCelSEDE7tRDOAhLEFDq7gCUloGQ561EsQP3CMa1OZwZpgSh +PwM2ruRAFIK0E95NvOfqsv0gYN0Svo7hYjNsvW6ok/ZGMyN2ikcRR04wGOFOGjfT +xTmfURc9ejnOjHAOqLTpToPwM1/gWWR2iMQefC4njy4MO2BXqOPUmHxmmR4PYhu2 +8EcKbyRs+/fvL3GgD3VAlOe5vnkfBzssQhHmexgSk5lHZrcSxUGXYGrYKPAeV2mk +5HRBWp0RAoGBAOUn5w+NCAugcTGP0hfNlyGXsXqUZvnMyFWvUcxgzgPlJyEyDnKn +aIb1DFOF2HckCfLZdrHqqgaF6K3TDvW9BgSKIsvISpo1S95ZPD6DKUo6YQ10CQRW +q/ZZVbxtFksVgFRGYpCVmPNULmx7CiXDT1b/suwNMAwCZwiNPTSvKQVLAoGBAMaj +zDo1/eepRslqnz5s8hh7dGEjfG/ZJcLgAJAxCyAgnIP4Tls7QkNhCVp9LcN6i1bc +CnT6AIuZRXSJWEdp4k2QnVFUmh9Q5MGgwrKYSY5M/1puTISlF1yQ8J6FX8BlDVmy +4dyaSyC0RIvgBzF9/KBDxxmJcHgGQ0awLeeyl4cvAoGBAN83FS3itLmOmXQrofyp +uNNyDeFXeU9OmL5OPqGUkljc+Favib9JLtp3DIC3WfoD0uUJy0LXULN18QaRFnts +mtYFMIvMGE9KJxL5XWOPI8M4Rp1yL+5X9r3Km2cl45dT5GMzBIPOFOTBVU86MtJC +A6C9Bi5FUk4AcRi1a69MB+stAoGAWNiwoyS9IV38dGCFQ4W1LzAg2MXnhZuJoUVR +2yykfkU33Gs2mOXDeKGxblDpJDLumfYnkzSzA72VbE92NdLtTqYtR1Bg8zraZqTC +EOG+nLBh0o/dF8ND1LpbdXvQXRyVwRYaofI9Qi5/LlUQwplIYmKObiSkMnsSok5w +6d5emi8CgYBjtUihOFaAmgqkTHOn4j4eKS1O7/H8QQSVe5M0bocmAIbgJ4At3GnI +E1JcIY2SZtSwAWs6aQPGE42gwsNCCsQWdJNtViO23JbCwlcPToC4aDfc0JJNaYqp +oVV7C5jmJh9VRd2tXIXIZMMNOfThfNf2qDQuJ1S2t5KugozFiRsHUg== +-----END RSA PRIVATE KEY-----` +var validCACert = `-----BEGIN CERTIFICATE----- +MIIE6DCCAtCgAwIBAgIBATANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDEwlBdHRl +c3RhbnQwHhcNMjAwMzE3MDc1OTU4WhcNMjEwOTE3MDc1OTUzWjAUMRIwEAYDVQQD +EwlBdHRlc3RhbnQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC85Ecg +rLGpidO9yrpXk2mJmahqou+NY3YmaD/h5c4S8OCJrkvbgUKqM6+pZtPJ3P3Dblba +mBsuDJ2TCFU4CBamuSwuxS15HyI9n5rUHGn7NLXbUVkNQRFsYqT4mwgc0wkwhzIm +ZceinUXlEUUVUTcoWoaZnRR5+bk0Dj0nuF2PCTwdMq2UqAUSE+rz1v2/KezWOTae +XUbDpqQ0b6F2dTjg72qPZJXV2J48qJBAxx42q+Bm8eeCFRPG7cdWn35BUa6Ri+S0 +aNPRpV6HqxYel/vnIbgZQ7ukWYeGCaKmOfaQoBGTmjKJ4jZrfKY8u06bIjMAYx6v +lTFBGKf43Sg8Z353dmAXqahSOjbFYMyTFQWOMy5t7elVOr/ZPXfZFquBd5Kb1s1H +6Ef8cd/TZAl7/9bAq8F7cYg4I9JUyy3kbLjI05qfiQGpd/0+zHFraP4WTMbU4g+k +bdWfkTQ4xAz1KY1trhUK7Ur6Bwf9QzbY6xZfDMftSnFzd8oWlspPO3KA23zQoVHH +18TXcM0efLY/xyEArctco2Rx/SNA3z0nY7tLaV1vB3P28y06XvHoUBu482YVbS4E +IMF48ddWSUfbChNZMPa4h8BSQVyrjvdU9R8LwRcaDAIFGWlqUqTIBJEiNoaFoIHK +Xyz3LZmcyZ7S547DDIWl5TcsJtl84GPWILzVowIDAQABo0UwQzAOBgNVHQ8BAf8E +BAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUnCGGWuHUASFv4FcE +dHLRgrh4XvYwDQYJKoZIhvcNAQELBQADggIBADXJjjhLrOrdsQLNQgHGL1cXtvmO +uZGbxZXAUPw/rFoL73gPhPm9+c9XSpQETYxNKJk6m4sZSMNNZ3vlrr9TlaIGev3r +06u0/suIj01YM6NAIlHPpflOONQscpEJDvCJEQg5kw/V5AT0OCqsnNholAyhlsjI +4nTAFmR3LY2oMPxgwZY+PrCskvgcNhtR5zh+WxB17TnnKz7yhphWlJHwLfJro928 +nB4thhwYNt3C5Z7tGiX/rA3MW7Sh/H34xC9ISs8ybkwVj+EKjf20FADzjRDF49++ +hqVDxOJw+W0ahQYHBQ/sTkn9S+Cp6PaAw9+efbPG1YuAkCFOWMDKM/yG5tLIpft+ +zASk+VL2WO9oNiJN0rtVwNU//TtENYLS+9p4XTpwEuEIH4ZZApVlEXf0GHKF1x+n +MVH80sXFC9siv5xW76FzvxKv/RZ6fKm8T+uizt8U+jwhL1flS4Ahj7zWUV6cwdWH +57O6FnN+VVNYbV4Ze8SzHS09eS1gBtityJVJttUJk70J/LtMPkaun/+VuMvAha7T +0tPn7P3RbGj8QYVUm+c8Z3arWaJ4K20n3v3rSYtLwV1PpI2T8nL8is7P1AUI4da+ +JW5Xg09Yct1izRb64SylduQC9a1bbjoMU0iABaDzCl7AHzK0RlkjALQ4sIt24nKL +Geq0WUbSP2OuDkAf +-----END CERTIFICATE-----` + +func TestNewRemoteWallet(t *testing.T) { + tests := []struct { + name string + opts string + clientCert string + clientKey string + caCert string + err string + }{ + { + name: "Empty", + opts: ``, + err: "unexpected end of JSON input", + }, + { + name: "NoAccounts", + opts: `{}`, + err: "at least one account specifier is required", + }, + { + name: "NoCertificates", + opts: `{"accounts":["foo"]}`, + err: "certificates are required", + }, + { + name: "NoClientCertificate", + opts: `{"accounts":["foo"],"certificates":{}}`, + err: "client certificate is required", + }, + { + name: "NoClientKey", + opts: `{"accounts":["foo"],"certificates":{"client_cert":"foo"}}`, + err: "client key is required", + }, + { + name: "MissingClientKey", + opts: `{"accounts":["foo"],"certificates":{"client_cert":"foo","client_key":"bar"}}`, + err: "failed to obtain client's certificate and/or key: open foo: no such file or directory", + }, + { + name: "BadClientCert", + clientCert: `bad`, + clientKey: validClientKey, + opts: `{"accounts":["foo"],"certificates":{"client_cert":"<>","client_key":"<>"}}`, + err: "failed to obtain client's certificate and/or key: tls: failed to find any PEM data in certificate input", + }, + { + name: "BadClientKey", + clientCert: validClientCert, + clientKey: `bad`, + opts: `{"accounts":["foo"],"certificates":{"client_cert":"<>","client_key":"<>"}}`, + err: "failed to obtain client's certificate and/or key: tls: failed to find any PEM data in key input", + }, + { + name: "MissingCACert", + clientCert: validClientCert, + clientKey: validClientKey, + opts: `{"accounts":["foo"],"certificates":{"client_cert":"<>","client_key":"<>","ca_cert":"bad"}}`, + err: "failed to obtain server's CA certificate: open bad: no such file or directory", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + + if test.caCert != "" || test.clientCert != "" || test.clientKey != "" { + dir := fmt.Sprintf("%s/%s", testutil.TempDir(), test.name) + if err := os.MkdirAll(dir, 0777); err != nil { + t.Fatalf(err.Error()) + } + if test.caCert != "" { + caCertPath := fmt.Sprintf("%s/ca.crt", dir) + if err := ioutil.WriteFile(caCertPath, []byte(test.caCert), 0666); err != nil { + t.Fatalf("Failed to write CA certificate: %v", err) + } + test.opts = strings.ReplaceAll(test.opts, "<>", caCertPath) + } + if test.clientCert != "" { + clientCertPath := fmt.Sprintf("%s/client.crt", dir) + if err := ioutil.WriteFile(clientCertPath, []byte(test.clientCert), 0666); err != nil { + t.Fatalf("Failed to write client certificate: %v", err) + } + test.opts = strings.ReplaceAll(test.opts, "<>", clientCertPath) + } + if test.clientKey != "" { + clientKeyPath := fmt.Sprintf("%s/client.key", dir) + if err := ioutil.WriteFile(clientKeyPath, []byte(test.clientKey), 0666); err != nil { + t.Fatalf("Failed to write client key: %v", err) + } + test.opts = strings.ReplaceAll(test.opts, "<>", clientKeyPath) + } + } + + _, _, err := keymanager.NewRemoteWallet(test.opts) + if test.err == "" { + if err != nil { + t.Fatalf("Received unexpected error: %v", err.Error()) + } + } else { + if err == nil { + t.Fatal("Did not received an error") + } + if err.Error() != test.err { + t.Fatalf("Did not received expected error: expected %v, received %v", test.err, err.Error()) + } + } + }) + } +} diff --git a/validator/keymanager/wallet.go b/validator/keymanager/wallet.go index 4d40b73975..471cfdcc2e 100644 --- a/validator/keymanager/wallet.go +++ b/validator/keymanager/wallet.go @@ -122,12 +122,13 @@ func (km *Wallet) FetchValidatingKeys() ([][48]byte, error) { } // Sign signs a message for the validator to broadcast. -func (km *Wallet) Sign(pubKey [48]byte, root [32]byte, domain uint64) (*bls.Signature, error) { +func (km *Wallet) Sign(pubKey [48]byte, root [32]byte) (*bls.Signature, error) { account, exists := km.accounts[pubKey] if !exists { return nil, ErrNoSuchKey } - sig, err := account.Sign(root[:], domain) + // TODO(#4817) Update with new library to remove domain here. + sig, err := account.Sign(root[:], 0) if err != nil { return nil, err } diff --git a/validator/main.go b/validator/main.go index 40928823d3..d38f12585a 100644 --- a/validator/main.go +++ b/validator/main.go @@ -35,7 +35,6 @@ func startNode(ctx *cli.Context) error { } var appFlags = []cli.Flag{ - flags.NoCustomConfigFlag, flags.BeaconRPCProviderFlag, flags.CertFlag, flags.GraffitiFlag, @@ -99,16 +98,9 @@ contract in order to activate the validator client`, }, Action: func(ctx *cli.Context) error { featureconfig.ConfigureValidator(ctx) - // Use custom config values if the --no-custom-config flag is set. - if !ctx.Bool(flags.NoCustomConfigFlag.Name) { - log.Info("Using custom parameter configuration") - if featureconfig.Get().MinimalConfig { - log.Warn("Using Minimal Config") - params.UseMinimalConfig() - } else { - log.Warn("Using Demo Config") - params.UseDemoBeaconConfig() - } + if featureconfig.Get().MinimalConfig { + log.Warn("Using Minimal Config") + params.UseMinimalConfig() } if keystoreDir, _, err := accounts.CreateValidatorAccount(ctx.String(flags.KeystorePathFlag.Name), ctx.String(flags.PasswordFlag.Name)); err != nil { diff --git a/validator/node/node.go b/validator/node/node.go index eb0dbdef3f..be34e4023d 100644 --- a/validator/node/node.go +++ b/validator/node/node.go @@ -248,6 +248,8 @@ func selectKeyManager(ctx *cli.Context) (keymanager.KeyManager, error) { km, help, err = keymanager.NewKeystore(opts) case "wallet": km, help, err = keymanager.NewWallet(opts) + case "remote": + km, help, err = keymanager.NewRemoteWallet(opts) default: return nil, fmt.Errorf("unknown keymanager %q", manager) } diff --git a/validator/usage.go b/validator/usage.go index 31472d6d49..244ce9b586 100644 --- a/validator/usage.go +++ b/validator/usage.go @@ -72,7 +72,6 @@ var appHelpFlagGroups = []flagGroup{ { Name: "validator", Flags: []cli.Flag{ - flags.NoCustomConfigFlag, flags.BeaconRPCProviderFlag, flags.CertFlag, flags.KeyManager,