mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 07:58:22 -05:00
Rename NewService to New (#8337)
* Hide beacon operation field if it's 0
* Rename NewSerivce to New
* Revert "Hide beacon operation field if it's 0"
This reverts commit 896fa11a0b.
* Fix NewServiceRegistry
* Update slasher/detection/service.go
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
This commit is contained in:
@@ -32,7 +32,7 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
s, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(beaconDB, []byte{'g'})
|
||||
@@ -133,7 +133,7 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(timeutils.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
@@ -157,7 +157,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
@@ -228,7 +228,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := uint64(1)
|
||||
@@ -266,7 +266,7 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
@@ -278,7 +278,7 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
@@ -290,7 +290,7 @@ func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := 2 * params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
@@ -303,7 +303,7 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
d := testutil.HydrateAttestationData(ðpb.AttestationData{})
|
||||
@@ -315,7 +315,7 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
@@ -333,7 +333,7 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
@@ -351,7 +351,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
@@ -378,7 +378,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
@@ -405,7 +405,7 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -129,7 +129,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
@@ -179,7 +179,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
@@ -213,7 +213,7 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
lastJustifiedBlk := testutil.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
@@ -243,7 +243,7 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
@@ -276,7 +276,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
@@ -310,7 +310,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedBlock := testutil.NewBeaconBlock()
|
||||
@@ -341,7 +341,7 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
@@ -378,7 +378,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
@@ -418,7 +418,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
@@ -558,7 +558,7 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(ctx, &Config{})
|
||||
service, err := New(ctx, &Config{})
|
||||
require.NoError(t, err)
|
||||
|
||||
cancel()
|
||||
@@ -571,7 +571,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
@@ -614,7 +614,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
@@ -653,7 +653,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
@@ -690,7 +690,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.genesisRoot = [32]byte{'a'}
|
||||
|
||||
@@ -743,7 +743,7 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
beaconState := testutil.NewBeaconState()
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
|
||||
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
service, err := New(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
require.NoError(t, err)
|
||||
service.justifiedCheckpt = test.args.cachedCheckPoint
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
@@ -834,7 +834,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
service, err := New(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: tt.args.finalizedRoot[:],
|
||||
@@ -852,7 +852,7 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
gBlk := testutil.NewBeaconBlock()
|
||||
@@ -880,7 +880,7 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
@@ -892,7 +892,7 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := testutil.DeterministicGenesisState(t, 1024)
|
||||
@@ -913,7 +913,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := testutil.DeterministicGenesisState(t, 32)
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
@@ -65,7 +65,7 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
@@ -99,7 +99,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
StateGen: stategen.New(beaconDB),
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := New(ctx, cfg)
|
||||
service.genesisTime = timeutils.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
|
||||
|
||||
@@ -133,7 +133,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
s, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
@@ -174,7 +174,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
s, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
@@ -255,7 +255,7 @@ func TestService_ReceiveBlockInitialSync(t *testing.T) {
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
s, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
@@ -336,7 +336,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
s, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
@@ -362,7 +362,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
|
||||
s, err := New(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'a'}
|
||||
if s.HasInitSyncBlock(r) {
|
||||
@@ -377,7 +377,7 @@ func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
s, err := New(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch * uint64(epochsSinceFinalitySaveHotStateDB)
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
@@ -390,7 +390,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
s, err := New(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
@@ -403,7 +403,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
s, err := New(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000}
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -99,9 +99,9 @@ type Config struct {
|
||||
WspEpoch uint64
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
// New instantiates a new block service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
func New(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -84,14 +84,14 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
DepositContainers: []*protodb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
web3Service, err = powchain.New(ctx, &powchain.Web3ServiceConfig{
|
||||
BeaconDB: beaconDB,
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: common.Address{},
|
||||
})
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
opsService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
opsService, err := attestations.New(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
@@ -113,7 +113,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
chainService, err := NewService(ctx, cfg)
|
||||
chainService, err := New(ctx, cfg)
|
||||
require.NoError(t, err, "Unable to setup chain service")
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
|
||||
@@ -48,10 +48,10 @@ type Config struct {
|
||||
GenesisPath string
|
||||
}
|
||||
|
||||
// NewService is an interoperability testing service to inject a deterministically generated genesis state
|
||||
// New is an interoperability testing service to inject a deterministically generated genesis state
|
||||
// into the beacon chain database and running services at start up. This service should not be used in production
|
||||
// as it does not have any value other than ease of use for testing purposes.
|
||||
func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
func New(ctx context.Context, cfg *Config) *Service {
|
||||
log.Warn("Saving generated genesis state in database for interop testing")
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
|
||||
@@ -385,7 +385,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
svc, err := p2p.NewService(b.ctx, &p2p.Config{
|
||||
svc, err := p2p.New(b.ctx, &p2p.Config{
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: sliceutil.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
BootstrapNodeAddr: bootnodeAddrs,
|
||||
@@ -420,7 +420,7 @@ func (b *BeaconNode) fetchP2P() p2p.P2P {
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerAttestationPool() error {
|
||||
s, err := attestations.NewService(b.ctx, &attestations.Config{
|
||||
s, err := attestations.New(b.ctx, &attestations.Config{
|
||||
Pool: b.attestationPool,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -447,7 +447,7 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
}
|
||||
|
||||
maxRoutines := b.cliCtx.Int(cmd.MaxGoroutines.Name)
|
||||
blockchainService, err := blockchain.NewService(b.ctx, &blockchain.Config{
|
||||
blockchainService, err := blockchain.New(b.ctx, &blockchain.Config{
|
||||
BeaconDB: b.db,
|
||||
DepositCache: b.depositCache,
|
||||
ChainStartFetcher: web3Service,
|
||||
@@ -498,7 +498,7 @@ func (b *BeaconNode) registerPOWChainService() error {
|
||||
StateGen: b.stateGen,
|
||||
Eth1HeaderReqLimit: b.cliCtx.Uint64(flags.Eth1HeaderReqLimit.Name),
|
||||
}
|
||||
web3Service, err := powchain.NewService(b.ctx, cfg)
|
||||
web3Service, err := powchain.New(b.ctx, cfg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not register proof-of-work chain web3Service")
|
||||
}
|
||||
@@ -538,7 +538,7 @@ func (b *BeaconNode) registerSyncService() error {
|
||||
return err
|
||||
}
|
||||
|
||||
rs := regularsync.NewService(b.ctx, ®ularsync.Config{
|
||||
rs := regularsync.New(b.ctx, ®ularsync.Config{
|
||||
DB: b.db,
|
||||
P2P: b.fetchP2P(),
|
||||
Chain: chainService,
|
||||
@@ -561,7 +561,7 @@ func (b *BeaconNode) registerInitialSyncService() error {
|
||||
return err
|
||||
}
|
||||
|
||||
is := initialsync.NewService(b.ctx, &initialsync.Config{
|
||||
is := initialsync.New(b.ctx, &initialsync.Config{
|
||||
DB: b.db,
|
||||
Chain: chainService,
|
||||
P2P: b.fetchP2P(),
|
||||
@@ -613,7 +613,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
|
||||
maxMsgSize := b.cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
|
||||
p2pService := b.fetchP2P()
|
||||
rpcService := rpc.NewService(b.ctx, &rpc.Config{
|
||||
rpcService := rpc.New(b.ctx, &rpc.Config{
|
||||
Host: host,
|
||||
Port: port,
|
||||
BeaconMonitoringHost: beaconMonitoringHost,
|
||||
@@ -679,7 +679,7 @@ func (b *BeaconNode) registerPrometheusService(cliCtx *cli.Context) error {
|
||||
|
||||
additionalHandlers = append(additionalHandlers, prometheus.Handler{Path: "/tree", Handler: c.TreeHandler})
|
||||
|
||||
service := prometheus.NewService(
|
||||
service := prometheus.New(
|
||||
fmt.Sprintf("%s:%d", b.cliCtx.String(cmd.MonitoringHostFlag.Name), b.cliCtx.Int(flags.MonitoringPortFlag.Name)),
|
||||
b.services,
|
||||
additionalHandlers...,
|
||||
@@ -719,7 +719,7 @@ func (b *BeaconNode) registerInteropServices() error {
|
||||
genesisStatePath := b.cliCtx.String(flags.InteropGenesisStateFlag.Name)
|
||||
|
||||
if genesisValidators > 0 || genesisStatePath != "" {
|
||||
svc := interopcoldstart.NewService(b.ctx, &interopcoldstart.Config{
|
||||
svc := interopcoldstart.New(b.ctx, &interopcoldstart.Config{
|
||||
GenesisTime: genesisTime,
|
||||
NumValidators: genesisValidators,
|
||||
BeaconDB: b.db,
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestBatchAttestations_Multiple(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
s, err := New(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
@@ -114,7 +114,7 @@ func TestBatchAttestations_Multiple(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBatchAttestations_Single(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
s, err := New(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
@@ -156,7 +156,7 @@ func TestBatchAttestations_Single(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAggregateAndSaveForkChoiceAtts_Single(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
s, err := New(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
@@ -180,7 +180,7 @@ func TestAggregateAndSaveForkChoiceAtts_Single(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAggregateAndSaveForkChoiceAtts_Multiple(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
s, err := New(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
@@ -231,7 +231,7 @@ func TestAggregateAndSaveForkChoiceAtts_Multiple(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSeenAttestations_PresentInCache(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
s, err := New(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
ad1 := testutil.HydrateAttestationData(ðpb.AttestationData{})
|
||||
@@ -305,7 +305,7 @@ func TestService_seen(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
s, err := New(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, tt := range tests {
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestPruneExpired_Ticker(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
s, err := NewService(ctx, &Config{
|
||||
s, err := New(ctx, &Config{
|
||||
Pool: NewPool(),
|
||||
pruneInterval: 250 * time.Millisecond,
|
||||
})
|
||||
@@ -81,7 +81,7 @@ func TestPruneExpired_Ticker(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPruneExpired_PruneExpiredAtts(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
s, err := New(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
ad1 := testutil.HydrateAttestationData(ðpb.AttestationData{})
|
||||
@@ -114,7 +114,7 @@ func TestPruneExpired_PruneExpiredAtts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPruneExpired_Expired(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
|
||||
s, err := New(context.Background(), &Config{Pool: NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rewind back one epoch worth of time.
|
||||
|
||||
@@ -30,9 +30,9 @@ type Config struct {
|
||||
pruneInterval time.Duration
|
||||
}
|
||||
|
||||
// NewService instantiates a new attestation pool service instance that will
|
||||
// New instantiates a new attestation pool service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
func New(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
cache, err := lru.New(forkChoiceProcessedRootsSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestStop_OK(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{})
|
||||
s, err := New(context.Background(), &Config{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Stop(), "Unable to stop attestation pool service")
|
||||
assert.ErrorContains(t, context.Canceled.Error(), s.ctx.Err(), "Context was not canceled")
|
||||
|
||||
@@ -192,7 +192,7 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
cfg.StaticPeers = staticPeers
|
||||
cfg.StateNotifier = &mock.MockStateNotifier{}
|
||||
cfg.NoDiscovery = true
|
||||
s, err := NewService(context.Background(), cfg)
|
||||
s, err := New(context.Background(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
exitRoutine := make(chan bool)
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
s, err = NewService(context.Background(), cfg)
|
||||
s, err = New(context.Background(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
@@ -182,7 +182,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
cfg.StateNotifier = &mock.MockStateNotifier{}
|
||||
s, err = NewService(context.Background(), cfg)
|
||||
s, err = New(context.Background(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.genesisTime = genesisTime
|
||||
|
||||
@@ -46,8 +46,8 @@ type Config struct {
|
||||
GossipScorerConfig *GossipScorerConfig
|
||||
}
|
||||
|
||||
// NewService provides fully initialized peer scoring service.
|
||||
func NewService(ctx context.Context, store *peerdata.Store, config *Config) *Service {
|
||||
// New provides fully initialized peer scoring service.
|
||||
func New(ctx context.Context, store *peerdata.Store, config *Config) *Service {
|
||||
s := &Service{
|
||||
store: store,
|
||||
weights: make(map[Scorer]float64),
|
||||
|
||||
@@ -88,7 +88,7 @@ func NewStatus(ctx context.Context, config *StatusConfig) *Status {
|
||||
return &Status{
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
scorers: scorers.NewService(ctx, store, config.ScorerParams),
|
||||
scorers: scorers.New(ctx, store, config.ScorerParams),
|
||||
ipTracker: map[string]uint64{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -325,7 +325,7 @@ func TestService_MonitorsStateForkUpdates(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
notifier := &mock.MockStateNotifier{}
|
||||
s, err := NewService(ctx, &Config{
|
||||
s, err := New(ctx, &Config{
|
||||
StateNotifier: notifier,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func TestService_PublishToTopicConcurrentMapWrite(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{
|
||||
s, err := New(context.Background(), &Config{
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -82,9 +82,9 @@ type Service struct {
|
||||
genesisValidatorsRoot []byte
|
||||
}
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
// New initializes a new p2p service compatible with shared.Service interface. No
|
||||
// connections are made until the Start function is called during the service registry startup.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
func New(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
var err error
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop().
|
||||
|
||||
@@ -81,7 +81,7 @@ func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) {
|
||||
}
|
||||
|
||||
func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
s, err := New(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
s.started = true
|
||||
s.dv5Listener = &mockListener{}
|
||||
@@ -90,7 +90,7 @@ func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_Stop_DontPanicIfDv5ListenerIsNotInited(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
s, err := New(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, s.Stop())
|
||||
}
|
||||
@@ -103,7 +103,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
UDPPort: 2000,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
s, err := NewService(context.Background(), cfg)
|
||||
s, err := New(context.Background(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.stateNotifier = &mock.MockStateNotifier{}
|
||||
s.dv5Listener = &mockListener{}
|
||||
@@ -206,7 +206,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
|
||||
s, err = NewService(context.Background(), cfg)
|
||||
s, err = New(context.Background(), cfg)
|
||||
require.NoError(t, err)
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
@@ -263,7 +263,7 @@ func TestPeer_Disconnect(t *testing.T) {
|
||||
func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
s, err := New(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
go s.awaitStateInitialized()
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
|
||||
UDPPort: uint(port),
|
||||
}
|
||||
cfg.StateNotifier = &mock.MockStateNotifier{}
|
||||
s, err = NewService(context.Background(), cfg)
|
||||
s, err = New(context.Background(), cfg)
|
||||
require.NoError(t, err)
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -69,7 +69,7 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
|
||||
func TestBlockHashByHeight_ReturnsHash(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -96,7 +96,7 @@ func TestBlockHashByHeight_ReturnsHash(t *testing.T) {
|
||||
|
||||
func TestBlockHashByHeight_ReturnsError_WhenNoEth1Client(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -112,7 +112,7 @@ func TestBlockHashByHeight_ReturnsError_WhenNoEth1Client(t *testing.T) {
|
||||
|
||||
func TestBlockExists_ValidHash(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -143,7 +143,7 @@ func TestBlockExists_ValidHash(t *testing.T) {
|
||||
|
||||
func TestBlockExists_InvalidHash(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -157,7 +157,7 @@ func TestBlockExists_InvalidHash(t *testing.T) {
|
||||
|
||||
func TestBlockExists_UsesCachedBlockInfo(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -180,7 +180,7 @@ func TestBlockExists_UsesCachedBlockInfo(t *testing.T) {
|
||||
|
||||
func TestBlockExistsWithCache_UsesCachedHeaderInfo(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -201,7 +201,7 @@ func TestBlockExistsWithCache_UsesCachedHeaderInfo(t *testing.T) {
|
||||
|
||||
func TestBlockExistsWithCache_HeaderNotCached(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -217,7 +217,7 @@ func TestService_BlockNumberByTimestamp(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -244,7 +244,7 @@ func TestService_BlockNumberByTimestampLessTargetTime(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -277,7 +277,7 @@ func TestService_BlockNumberByTimestampMoreTargetTime(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -308,7 +308,7 @@ func TestService_BlockNumberByTimestampMoreTargetTime(t *testing.T) {
|
||||
|
||||
func TestService_BlockTimeByHeight_ReturnsError_WhenNoEth1Client(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
|
||||
@@ -22,7 +22,7 @@ const pubKeyErr = "could not convert bytes to public key"
|
||||
|
||||
func TestProcessDeposit_OK(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -46,7 +46,7 @@ func TestProcessDeposit_OK(t *testing.T) {
|
||||
|
||||
func TestProcessDeposit_InvalidMerkleBranch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -72,7 +72,7 @@ func TestProcessDeposit_InvalidMerkleBranch(t *testing.T) {
|
||||
func TestProcessDeposit_InvalidPublicKey(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -108,7 +108,7 @@ func TestProcessDeposit_InvalidPublicKey(t *testing.T) {
|
||||
func TestProcessDeposit_InvalidSignature(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -143,7 +143,7 @@ func TestProcessDeposit_InvalidSignature(t *testing.T) {
|
||||
func TestProcessDeposit_UnableToVerify(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -176,7 +176,7 @@ func TestProcessDeposit_UnableToVerify(t *testing.T) {
|
||||
|
||||
func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -237,7 +237,7 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
|
||||
|
||||
func TestProcessDeposit_AllDepositedSuccessfully(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
|
||||
@@ -39,7 +39,7 @@ func TestProcessDepositLog_OK(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -103,7 +103,7 @@ func TestProcessDepositLog_InsertsPendingDeposit(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -160,7 +160,7 @@ func TestUnpackDepositLogData_OK(t *testing.T) {
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
@@ -211,7 +211,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -282,7 +282,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -369,7 +369,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: kvStore,
|
||||
@@ -462,7 +462,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: kvStore,
|
||||
@@ -565,7 +565,7 @@ func TestWeb3ServiceProcessDepositLog_RequestMissedDeposits(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -717,7 +717,7 @@ func newPowchainService(t *testing.T, eth1Backend *contracts.TestAccount, beacon
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: eth1Backend.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
|
||||
@@ -166,9 +166,9 @@ type Web3ServiceConfig struct {
|
||||
Eth1HeaderReqLimit uint64
|
||||
}
|
||||
|
||||
// NewService sets up a new instance with an ethclient when
|
||||
// New sets up a new instance with an ethclient when
|
||||
// given a web3 endpoint as a string in the config.
|
||||
func NewService(ctx context.Context, config *Web3ServiceConfig) (*Service, error) {
|
||||
func New(ctx context.Context, config *Web3ServiceConfig) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop()
|
||||
depositTrie, err := trieutil.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestStart_OK(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -152,7 +152,7 @@ func TestStart_NoHTTPEndpointDefinedFails_WithoutChainStarted(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
s, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
s, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{""}, // No endpoint defined!
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -193,7 +193,7 @@ func TestStart_NoHTTPEndpointDefinedSucceeds_WithGenesisState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(context.Background(), st, genRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(context.Background(), genRoot))
|
||||
s, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
s, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{""}, // No endpoint defined!
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -223,7 +223,7 @@ func TestStart_NoHTTPEndpointDefinedSucceeds_WithChainStarted(t *testing.T) {
|
||||
ChainstartData: &protodb.ChainStartData{Chainstarted: true},
|
||||
Trie: &protodb.SparseMerkleTrie{},
|
||||
}))
|
||||
s, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
s, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{""}, // No endpoint defined!
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -240,7 +240,7 @@ func TestStop_OK(t *testing.T) {
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -265,7 +265,7 @@ func TestService_Eth1Synced(t *testing.T) {
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -286,7 +286,7 @@ func TestFollowBlock_OK(t *testing.T) {
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -359,7 +359,7 @@ func TestStatus(t *testing.T) {
|
||||
func TestHandlePanic_OK(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
@@ -398,7 +398,7 @@ func TestLogTillGenesis_OK(t *testing.T) {
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -451,11 +451,11 @@ func TestInitDepositCache_OK(t *testing.T) {
|
||||
require.Equal(t, 3, len(s.depositCache.PendingContainers(context.Background(), nil)))
|
||||
}
|
||||
|
||||
func TestNewService_EarliestVotingBlock(t *testing.T) {
|
||||
func TestNew_EarliestVotingBlock(t *testing.T) {
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
web3Service, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -501,12 +501,12 @@ func TestNewService_EarliestVotingBlock(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestNewService_Eth1HeaderRequLimit(t *testing.T) {
|
||||
func TestNew_Eth1HeaderRequLimit(t *testing.T) {
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
|
||||
s1, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
s1, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -514,7 +514,7 @@ func TestNewService_Eth1HeaderRequLimit(t *testing.T) {
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
assert.Equal(t, defaultEth1HeaderReqLimit, s1.eth1HeaderReqLimit, "default eth1 header request limit not set")
|
||||
|
||||
s2, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
s2, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
@@ -532,7 +532,7 @@ func TestServiceFallbackCorrectly(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
|
||||
s1, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
s1, err := New(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndpoints: []string{firstEndpoint},
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
|
||||
@@ -138,9 +138,9 @@ type Config struct {
|
||||
MaxMsgSize int
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
// New instantiates a new RPC service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
func New(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestLifecycle_OK(t *testing.T) {
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
rpcService := NewService(context.Background(), &Config{
|
||||
rpcService := New(context.Background(), &Config{
|
||||
Port: "7348",
|
||||
SyncService: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: chainService,
|
||||
@@ -53,7 +53,7 @@ func TestStatus_CredentialError(t *testing.T) {
|
||||
func TestRPC_InsecureEndpoint(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
chainService := &mock.ChainService{Genesis: time.Now()}
|
||||
rpcService := NewService(context.Background(), &Config{
|
||||
rpcService := New(context.Background(), &Config{
|
||||
Port: "7777",
|
||||
SyncService: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: chainService,
|
||||
|
||||
@@ -325,7 +325,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
err = beaconDB.SaveBlock(context.Background(), genesisBlk)
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
s := NewService(context.Background(), &Config{
|
||||
s := New(context.Background(), &Config{
|
||||
P2P: p2pt.NewTestP2P(t),
|
||||
DB: beaconDB,
|
||||
Chain: &mock.ChainService{
|
||||
@@ -385,7 +385,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
err = beaconDB.SaveBlock(context.Background(), genesisBlk)
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
s := NewService(context.Background(), &Config{
|
||||
s := New(context.Background(), &Config{
|
||||
P2P: p2pt.NewTestP2P(t),
|
||||
DB: beaconDB,
|
||||
Chain: &mock.ChainService{
|
||||
|
||||
@@ -56,9 +56,9 @@ type Service struct {
|
||||
genesisChan chan time.Time
|
||||
}
|
||||
|
||||
// NewService configures the initial sync service responsible for bringing the node up to the
|
||||
// New configures the initial sync service responsible for bringing the node up to the
|
||||
// latest head of the blockchain.
|
||||
func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
func New(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -152,7 +152,7 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
}
|
||||
// Initialize feed
|
||||
notifier := &mock.MockStateNotifier{}
|
||||
s := NewService(ctx, &Config{
|
||||
s := New(ctx, &Config{
|
||||
P2P: p,
|
||||
Chain: mc,
|
||||
StateNotifier: notifier,
|
||||
@@ -319,7 +319,7 @@ func TestService_markSynced(t *testing.T) {
|
||||
mc := &mock.ChainService{}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s := NewService(ctx, &Config{
|
||||
s := New(ctx, &Config{
|
||||
Chain: mc,
|
||||
StateNotifier: mc.StateNotifier(),
|
||||
})
|
||||
@@ -416,7 +416,7 @@ func TestService_Resync(t *testing.T) {
|
||||
if tt.chainService != nil {
|
||||
mc = tt.chainService()
|
||||
}
|
||||
s := NewService(ctx, &Config{
|
||||
s := New(ctx, &Config{
|
||||
DB: beaconDB,
|
||||
P2P: p,
|
||||
Chain: mc,
|
||||
@@ -438,7 +438,7 @@ func TestService_Resync(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_Initialized(t *testing.T) {
|
||||
s := NewService(context.Background(), &Config{})
|
||||
s := New(context.Background(), &Config{})
|
||||
s.chainStarted.Set()
|
||||
assert.Equal(t, true, s.Initialized())
|
||||
s.chainStarted.UnSet()
|
||||
|
||||
@@ -114,8 +114,8 @@ type Service struct {
|
||||
stateGen *stategen.State
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
// New initializes new regular sync service.
|
||||
func New(ctx context.Context, cfg *Config) *Service {
|
||||
c := gcache.New(pendingBlockExpTime /* exp time */, 2*pendingBlockExpTime /* prune time */)
|
||||
|
||||
rLimiter := newRateLimiter(cfg.P2P)
|
||||
|
||||
@@ -117,12 +117,12 @@ func BeaconFuzzBlock(b []byte) {
|
||||
ap := attestations.NewPool()
|
||||
ep := voluntaryexits.NewPool()
|
||||
sp := slashings.NewPool()
|
||||
ops, err := attestations.NewService(context.Background(), &attestations.Config{Pool: ap})
|
||||
ops, err := attestations.New(context.Background(), &attestations.Config{Pool: ap})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
chain, err := blockchain.NewService(context.Background(), &blockchain.Config{
|
||||
chain, err := blockchain.New(context.Background(), &blockchain.Config{
|
||||
ChainStartFetcher: nil,
|
||||
BeaconDB: db1,
|
||||
DepositCache: nil,
|
||||
|
||||
@@ -25,7 +25,7 @@ func init() {
|
||||
logrus.SetLevel(logrus.PanicLevel)
|
||||
|
||||
var err error
|
||||
p, err = p2p.NewService(context.Background(), &p2p.Config{
|
||||
p, err = p2p.New(context.Background(), &p2p.Config{
|
||||
NoDiscovery: true,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -44,7 +44,7 @@ func init() {
|
||||
if err := p.Connect(info); err != nil {
|
||||
panic(errors.Wrap(err, "could not connect to peer"))
|
||||
}
|
||||
regularsync.NewService(context.Background(), ®ularsync.Config{
|
||||
regularsync.New(context.Background(), ®ularsync.Config{
|
||||
P2P: p,
|
||||
DB: nil,
|
||||
AttPool: nil,
|
||||
|
||||
@@ -24,7 +24,7 @@ type logger interface {
|
||||
}
|
||||
|
||||
func TestLogrusCollector(t *testing.T) {
|
||||
service := prometheus.NewService(addr, nil)
|
||||
service := prometheus.New(addr, nil)
|
||||
hook := prometheus.NewLogrusCollector()
|
||||
log.AddHook(hook)
|
||||
go service.Start()
|
||||
|
||||
@@ -34,9 +34,9 @@ type Handler struct {
|
||||
Handler func(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
// NewService sets up a new instance for a given address host:port.
|
||||
// New sets up a new instance for a given address host:port.
|
||||
// An empty host will match with any IP so an address like ":2121" is perfectly acceptable.
|
||||
func NewService(addr string, svcRegistry *shared.ServiceRegistry, additionalHandlers ...Handler) *Service {
|
||||
func New(addr string, svcRegistry *shared.ServiceRegistry, additionalHandlers ...Handler) *Service {
|
||||
s := &Service{svcRegistry: svcRegistry}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
@@ -21,7 +21,7 @@ func init() {
|
||||
}
|
||||
|
||||
func TestLifecycle(t *testing.T) {
|
||||
prometheusService := NewService(":2112", nil)
|
||||
prometheusService := New(":2112", nil)
|
||||
prometheusService.Start()
|
||||
// Give service time to start.
|
||||
time.Sleep(time.Second)
|
||||
@@ -60,7 +60,7 @@ func TestHealthz(t *testing.T) {
|
||||
registry := shared.NewServiceRegistry()
|
||||
m := &mockService{}
|
||||
require.NoError(t, registry.RegisterService(m), "Failed to register service")
|
||||
s := NewService("" /*addr*/, registry)
|
||||
s := New("" /*addr*/, registry)
|
||||
|
||||
req, err := http.NewRequest("GET", "/healthz", nil /*reader*/)
|
||||
require.NoError(t, err)
|
||||
@@ -112,7 +112,7 @@ func TestContentNegotiation(t *testing.T) {
|
||||
registry := shared.NewServiceRegistry()
|
||||
m := &mockService{}
|
||||
require.NoError(t, registry.RegisterService(m), "Failed to register service")
|
||||
s := NewService("", registry)
|
||||
s := New("", registry)
|
||||
|
||||
req, err := http.NewRequest("GET", "/healthz", nil /* body */)
|
||||
require.NoError(t, err)
|
||||
@@ -143,7 +143,7 @@ func TestContentNegotiation(t *testing.T) {
|
||||
m := &mockService{}
|
||||
m.status = errors.New("something is wrong")
|
||||
require.NoError(t, registry.RegisterService(m), "Failed to register service")
|
||||
s := NewService("", registry)
|
||||
s := New("", registry)
|
||||
|
||||
req, err := http.NewRequest("GET", "/healthz", nil /* body */)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -72,8 +72,8 @@ type Config struct {
|
||||
NodeClient ethpb.NodeClient
|
||||
}
|
||||
|
||||
// NewService instantiation.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
// New instantiation.
|
||||
func New(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop()
|
||||
publicKeyCache, err := cache.NewPublicKeyCache(0, nil)
|
||||
|
||||
@@ -74,8 +74,8 @@ type Config struct {
|
||||
HistoricalDetection bool
|
||||
}
|
||||
|
||||
// NewService instantiation.
|
||||
func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
// New instantiates a new service.
|
||||
func New(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -169,7 +169,7 @@ func (n *SlasherNode) registerPrometheusService(cliCtx *cli.Context) error {
|
||||
},
|
||||
)
|
||||
}
|
||||
service := prometheus.NewService(
|
||||
service := prometheus.New(
|
||||
fmt.Sprintf("%s:%d", n.cliCtx.String(cmd.MonitoringHostFlag.Name), n.cliCtx.Int(flags.MonitoringPortFlag.Name)),
|
||||
n.services,
|
||||
additionalHandlers...,
|
||||
@@ -226,7 +226,7 @@ func (n *SlasherNode) registerBeaconClientService() error {
|
||||
beaconProvider = flags.BeaconRPCProviderFlag.Value
|
||||
}
|
||||
|
||||
bs, err := beaconclient.NewService(n.ctx, &beaconclient.Config{
|
||||
bs, err := beaconclient.New(n.ctx, &beaconclient.Config{
|
||||
BeaconCert: beaconCert,
|
||||
SlasherDB: n.db,
|
||||
BeaconProvider: beaconProvider,
|
||||
@@ -244,7 +244,7 @@ func (n *SlasherNode) registerDetectionService() error {
|
||||
if err := n.services.FetchService(&bs); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ds := detection.NewService(n.ctx, &detection.Config{
|
||||
ds := detection.New(n.ctx, &detection.Config{
|
||||
Notifier: bs,
|
||||
SlasherDB: n.db,
|
||||
BeaconClient: bs,
|
||||
@@ -269,7 +269,7 @@ func (n *SlasherNode) registerRPCService() error {
|
||||
port := n.cliCtx.String(flags.RPCPort.Name)
|
||||
cert := n.cliCtx.String(flags.CertFlag.Name)
|
||||
key := n.cliCtx.String(flags.KeyFlag.Name)
|
||||
rpcService := rpc.NewService(n.ctx, &rpc.Config{
|
||||
rpcService := rpc.New(n.ctx, &rpc.Config{
|
||||
Host: host,
|
||||
Port: port,
|
||||
CertFlag: cert,
|
||||
|
||||
@@ -63,9 +63,9 @@ func TestServer_IsSlashableAttestation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
bcCfg := &beaconclient.Config{BeaconClient: bClient, NodeClient: nClient, SlasherDB: db}
|
||||
bs, err := beaconclient.NewService(ctx, bcCfg)
|
||||
bs, err := beaconclient.New(ctx, bcCfg)
|
||||
require.NoError(t, err)
|
||||
ds := detection.NewService(ctx, cfg)
|
||||
ds := detection.New(ctx, cfg)
|
||||
server := Server{ctx: ctx, detector: ds, slasherDB: db, beaconClient: bs}
|
||||
nClient.EXPECT().GetGenesis(gomock.Any(), gomock.Any()).Return(wantedGenesis, nil).AnyTimes()
|
||||
bClient.EXPECT().ListValidators(
|
||||
@@ -165,9 +165,9 @@ func TestServer_IsSlashableAttestationNoUpdate(t *testing.T) {
|
||||
savedAttestation.Signature = marshalledSig
|
||||
|
||||
bcCfg := &beaconclient.Config{BeaconClient: bClient, NodeClient: nClient, SlasherDB: db}
|
||||
bs, err := beaconclient.NewService(ctx, bcCfg)
|
||||
bs, err := beaconclient.New(ctx, bcCfg)
|
||||
require.NoError(t, err)
|
||||
ds := detection.NewService(ctx, cfg)
|
||||
ds := detection.New(ctx, cfg)
|
||||
server := Server{ctx: ctx, detector: ds, slasherDB: db, beaconClient: bs}
|
||||
slashings, err := server.IsSlashableAttestation(ctx, savedAttestation)
|
||||
require.NoError(t, err, "Got error while trying to detect slashing")
|
||||
@@ -221,9 +221,9 @@ func TestServer_IsSlashableBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
bcCfg := &beaconclient.Config{BeaconClient: bClient, NodeClient: nClient, SlasherDB: db}
|
||||
bs, err := beaconclient.NewService(ctx, bcCfg)
|
||||
bs, err := beaconclient.New(ctx, bcCfg)
|
||||
require.NoError(t, err)
|
||||
ds := detection.NewService(ctx, cfg)
|
||||
ds := detection.New(ctx, cfg)
|
||||
server := Server{ctx: ctx, detector: ds, slasherDB: db, beaconClient: bs}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -312,9 +312,9 @@ func TestServer_IsSlashableBlockNoUpdate(t *testing.T) {
|
||||
marshalledSig := blockSig.Marshal()
|
||||
savedBlock.Signature = marshalledSig
|
||||
bcCfg := &beaconclient.Config{BeaconClient: bClient, NodeClient: nClient, SlasherDB: db}
|
||||
bs, err := beaconclient.NewService(ctx, bcCfg)
|
||||
bs, err := beaconclient.New(ctx, bcCfg)
|
||||
require.NoError(t, err)
|
||||
ds := detection.NewService(ctx, cfg)
|
||||
ds := detection.New(ctx, cfg)
|
||||
server := Server{ctx: ctx, detector: ds, slasherDB: db, beaconClient: bs}
|
||||
slashings, err := server.IsSlashableBlock(ctx, savedBlock)
|
||||
require.NoError(t, err, "Got error while trying to detect slashing")
|
||||
|
||||
@@ -51,9 +51,9 @@ type Config struct {
|
||||
BeaconClient *beaconclient.Service
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
// New instantiates a new RPC service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
func New(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
func TestLifecycle_OK(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
rpcService := NewService(context.Background(), &Config{
|
||||
rpcService := New(context.Background(), &Config{
|
||||
Port: "7348",
|
||||
CertFlag: "alice.crt",
|
||||
KeyFlag: "alice.key",
|
||||
@@ -33,7 +33,7 @@ func TestStatus_CredentialError(t *testing.T) {
|
||||
|
||||
func TestRPC_InsecureEndpoint(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
rpcService := NewService(context.Background(), &Config{
|
||||
rpcService := New(context.Background(), &Config{
|
||||
Port: "7777",
|
||||
})
|
||||
|
||||
|
||||
@@ -359,7 +359,7 @@ func (c *ValidatorClient) registerPrometheusService(cliCtx *cli.Context) error {
|
||||
},
|
||||
)
|
||||
}
|
||||
service := prometheus.NewService(
|
||||
service := prometheus.New(
|
||||
fmt.Sprintf("%s:%d", c.cliCtx.String(cmd.MonitoringHostFlag.Name), c.cliCtx.Int(flags.MonitoringPortFlag.Name)),
|
||||
c.services,
|
||||
additionalHandlers...,
|
||||
@@ -431,7 +431,7 @@ func (c *ValidatorClient) registerSlasherClientService() error {
|
||||
maxCallRecvMsgSize := c.cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
|
||||
grpcRetries := c.cliCtx.Uint(flags.GrpcRetriesFlag.Name)
|
||||
grpcRetryDelay := c.cliCtx.Duration(flags.GrpcRetryDelayFlag.Name)
|
||||
sp, err := slashingprotection.NewService(c.cliCtx.Context, &slashingprotection.Config{
|
||||
sp, err := slashingprotection.New(c.cliCtx.Context, &slashingprotection.Config{
|
||||
Endpoint: endpoint,
|
||||
CertFlag: cert,
|
||||
GrpcMaxCallRecvMsgSizeFlag: maxCallRecvMsgSize,
|
||||
|
||||
@@ -45,9 +45,9 @@ type Config struct {
|
||||
GrpcHeadersFlag string
|
||||
}
|
||||
|
||||
// NewService creates a new validator service for the service
|
||||
// New creates a new validator service for the service
|
||||
// registry.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
func New(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
|
||||
Reference in New Issue
Block a user