diff --git a/api/client/beacon/health/health_test.go b/api/client/beacon/health/health_test.go index c3ac1f1caa..6d9e3da595 100644 --- a/api/client/beacon/health/health_test.go +++ b/api/client/beacon/health/health_test.go @@ -1,7 +1,6 @@ package health import ( - "context" "sync" "testing" @@ -23,7 +22,7 @@ func TestNodeHealth_IsHealthy(t *testing.T) { isHealthy: &tt.isHealthy, healthChan: make(chan bool, 1), } - if got := n.IsHealthy(context.Background()); got != tt.want { + if got := n.IsHealthy(t.Context()); got != tt.want { t.Errorf("IsHealthy() = %v, want %v", got, tt.want) } }) @@ -54,7 +53,7 @@ func TestNodeHealth_UpdateNodeHealth(t *testing.T) { healthChan: make(chan bool, 1), } - s := n.CheckHealth(context.Background()) + s := n.CheckHealth(t.Context()) // Check if health status was updated if s != tt.newStatus { t.Errorf("UpdateNodeHealth() failed to update isHealthy from %v to %v", tt.initial, tt.newStatus) @@ -93,9 +92,9 @@ func TestNodeHealth_Concurrency(t *testing.T) { go func() { defer wg.Done() client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1) - n.CheckHealth(context.Background()) + n.CheckHealth(t.Context()) client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1) - n.CheckHealth(context.Background()) + n.CheckHealth(t.Context()) }() } @@ -103,7 +102,7 @@ func TestNodeHealth_Concurrency(t *testing.T) { for i := 0; i < numGoroutines; i++ { go func() { defer wg.Done() - _ = n.IsHealthy(context.Background()) // Just read the value + _ = n.IsHealthy(t.Context()) // Just read the value }() } diff --git a/api/client/builder/client_test.go b/api/client/builder/client_test.go index 0b8c147644..106a0f32e3 100644 --- a/api/client/builder/client_test.go +++ b/api/client/builder/client_test.go @@ -2,7 +2,6 @@ package builder import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -31,7 +30,7 @@ func (fn roundtrip) RoundTrip(r *http.Request) (*http.Response, error) { } func TestClient_Status(t *testing.T) { - ctx := context.Background() + ctx := t.Context() statusPath := "/eth/v1/builder/status" hc := &http.Client{ Transport: roundtrip(func(r *http.Request) (*http.Response, error) { @@ -84,7 +83,7 @@ func TestClient_Status(t *testing.T) { } func TestClient_RegisterValidator(t *testing.T) { - ctx := context.Background() + ctx := t.Context() expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]` expectedPath := "/eth/v1/builder/validators" t.Run("JSON success", func(t *testing.T) { @@ -168,7 +167,7 @@ func TestClient_RegisterValidator(t *testing.T) { } func TestClient_GetHeader(t *testing.T) { - ctx := context.Background() + ctx := t.Context() expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a" var slot primitives.Slot = 23 parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2") @@ -601,7 +600,7 @@ func TestClient_GetHeader(t *testing.T) { } func TestSubmitBlindedBlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("bellatrix", func(t *testing.T) { hc := &http.Client{ @@ -1559,7 +1558,7 @@ func TestRequestLogger(t *testing.T) { c, err := NewClient("localhost:3500", wo) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() hc := &http.Client{ Transport: roundtrip(func(r *http.Request) (*http.Response, error) { require.Equal(t, getStatus, r.URL.Path) diff --git a/api/client/event/event_stream_test.go b/api/client/event/event_stream_test.go index 10318eba3b..886e4dc13c 100644 --- a/api/client/event/event_stream_test.go +++ b/api/client/event/event_stream_test.go @@ -1,7 +1,6 @@ package event import ( - "context" "fmt" "net/http" "net/http/httptest" @@ -30,7 +29,7 @@ func TestNewEventStream(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := NewEventStream(context.Background(), &http.Client{}, tt.host, tt.topics) + _, err := NewEventStream(t.Context(), &http.Client{}, tt.host, tt.topics) if (err != nil) != tt.wantErr { t.Errorf("NewEventStream() error = %v, wantErr %v", err, tt.wantErr) } @@ -56,7 +55,7 @@ func TestEventStream(t *testing.T) { topics := []string{"head"} eventsChannel := make(chan *Event, 1) - stream, err := NewEventStream(context.Background(), http.DefaultClient, server.URL, topics) + stream, err := NewEventStream(t.Context(), http.DefaultClient, server.URL, topics) require.NoError(t, err) go stream.Subscribe(eventsChannel) diff --git a/api/grpc/grpcutils_test.go b/api/grpc/grpcutils_test.go index 8fb9ca67cb..c9d49b6957 100644 --- a/api/grpc/grpcutils_test.go +++ b/api/grpc/grpcutils_test.go @@ -1,7 +1,6 @@ package grpc import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/testing/assert" @@ -16,7 +15,7 @@ type customErrorData struct { func TestAppendHeaders(t *testing.T) { t.Run("one_header", func(t *testing.T) { - ctx := AppendHeaders(context.Background(), []string{"first=value1"}) + ctx := AppendHeaders(t.Context(), []string{"first=value1"}) md, ok := metadata.FromOutgoingContext(ctx) require.Equal(t, true, ok, "Failed to read context metadata") require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values") @@ -24,7 +23,7 @@ func TestAppendHeaders(t *testing.T) { }) t.Run("multiple_headers", func(t *testing.T) { - ctx := AppendHeaders(context.Background(), []string{"first=value1", "second=value2"}) + ctx := AppendHeaders(t.Context(), []string{"first=value1", "second=value2"}) md, ok := metadata.FromOutgoingContext(ctx) require.Equal(t, true, ok, "Failed to read context metadata") require.Equal(t, 2, md.Len(), "MetadataV0 contains wrong number of values") @@ -33,7 +32,7 @@ func TestAppendHeaders(t *testing.T) { }) t.Run("one_empty_header", func(t *testing.T) { - ctx := AppendHeaders(context.Background(), []string{"first=value1", ""}) + ctx := AppendHeaders(t.Context(), []string{"first=value1", ""}) md, ok := metadata.FromOutgoingContext(ctx) require.Equal(t, true, ok, "Failed to read context metadata") require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values") @@ -42,7 +41,7 @@ func TestAppendHeaders(t *testing.T) { t.Run("incorrect_header", func(t *testing.T) { logHook := logTest.NewGlobal() - ctx := AppendHeaders(context.Background(), []string{"first=value1", "second"}) + ctx := AppendHeaders(t.Context(), []string{"first=value1", "second"}) md, ok := metadata.FromOutgoingContext(ctx) require.Equal(t, true, ok, "Failed to read context metadata") require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values") @@ -51,7 +50,7 @@ func TestAppendHeaders(t *testing.T) { }) t.Run("header_value_with_equal_sign", func(t *testing.T) { - ctx := AppendHeaders(context.Background(), []string{"first=value=1"}) + ctx := AppendHeaders(t.Context(), []string{"first=value=1"}) md, ok := metadata.FromOutgoingContext(ctx) require.Equal(t, true, ok, "Failed to read context metadata") require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values") diff --git a/api/server/httprest/server_test.go b/api/server/httprest/server_test.go index 22d789ad96..960a733c78 100644 --- a/api/server/httprest/server_test.go +++ b/api/server/httprest/server_test.go @@ -1,7 +1,6 @@ package httprest import ( - "context" "flag" "fmt" "net" @@ -34,7 +33,7 @@ func TestServer_StartStop(t *testing.T) { WithRouter(handler), } - g, err := New(context.Background(), opts...) + g, err := New(t.Context(), opts...) require.NoError(t, err) g.Start() @@ -62,7 +61,7 @@ func TestServer_NilHandler_NotFoundHandlerRegistered(t *testing.T) { WithRouter(handler), } - g, err := New(context.Background(), opts...) + g, err := New(t.Context(), opts...) require.NoError(t, err) writer := httptest.NewRecorder() diff --git a/async/debounce_test.go b/async/debounce_test.go index 4ebe2faab8..e190c85da2 100644 --- a/async/debounce_test.go +++ b/async/debounce_test.go @@ -15,7 +15,7 @@ import ( func TestDebounce_NoEvents(t *testing.T) { eventsChan := make(chan interface{}, 100) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) interval := time.Second timesHandled := int32(0) wg := &sync.WaitGroup{} @@ -39,7 +39,7 @@ func TestDebounce_NoEvents(t *testing.T) { func TestDebounce_CtxClosing(t *testing.T) { eventsChan := make(chan interface{}, 100) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) interval := time.Second timesHandled := int32(0) wg := &sync.WaitGroup{} @@ -75,7 +75,7 @@ func TestDebounce_CtxClosing(t *testing.T) { func TestDebounce_SingleHandlerInvocation(t *testing.T) { eventsChan := make(chan interface{}, 100) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) interval := time.Second timesHandled := int32(0) go async.Debounce(ctx, interval, eventsChan, func(event interface{}) { @@ -93,7 +93,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) { func TestDebounce_MultipleHandlerInvocation(t *testing.T) { eventsChan := make(chan interface{}, 100) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) interval := time.Second timesHandled := int32(0) go async.Debounce(ctx, interval, eventsChan, func(event interface{}) { diff --git a/async/every_test.go b/async/every_test.go index 2fea2388e7..499cc27cd3 100644 --- a/async/every_test.go +++ b/async/every_test.go @@ -10,7 +10,7 @@ import ( ) func TestEveryRuns(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) i := int32(0) async.RunEvery(ctx, 100*time.Millisecond, func() { diff --git a/beacon-chain/blockchain/chain_info_norace_test.go b/beacon-chain/blockchain/chain_info_norace_test.go index 8a6c7b6c91..5a1e5d6a98 100644 --- a/beacon-chain/blockchain/chain_info_norace_test.go +++ b/beacon-chain/blockchain/chain_info_norace_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "context" "testing" testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing" @@ -25,7 +24,7 @@ func TestHeadSlot_DataRace(t *testing.T) { wait := make(chan struct{}) go func() { defer close(wait) - require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st)) + require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st)) }() s.HeadSlot() <-wait @@ -43,10 +42,10 @@ func TestHeadRoot_DataRace(t *testing.T) { st, _ := util.DeterministicGenesisState(t, 1) go func() { defer close(wait) - require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st)) + require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st)) }() - _, err = s.HeadRoot(context.Background()) + _, err = s.HeadRoot(t.Context()) require.NoError(t, err) <-wait } @@ -65,10 +64,10 @@ func TestHeadBlock_DataRace(t *testing.T) { st, _ := util.DeterministicGenesisState(t, 1) go func() { defer close(wait) - require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st)) + require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st)) }() - _, err = s.HeadBlock(context.Background()) + _, err = s.HeadBlock(t.Context()) require.NoError(t, err) <-wait } @@ -83,14 +82,14 @@ func TestHeadState_DataRace(t *testing.T) { wait := make(chan struct{}) st, _ := util.DeterministicGenesisState(t, 1) root := bytesutil.ToBytes32(bytesutil.PadTo([]byte{'s'}, 32)) - require.NoError(t, beaconDB.SaveGenesisBlockRoot(context.Background(), root)) - require.NoError(t, beaconDB.SaveState(context.Background(), st, root)) + require.NoError(t, beaconDB.SaveGenesisBlockRoot(t.Context(), root)) + require.NoError(t, beaconDB.SaveState(t.Context(), st, root)) go func() { defer close(wait) - require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st)) + require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st)) }() - _, err = s.HeadState(context.Background()) + _, err = s.HeadState(t.Context()) require.NoError(t, err) <-wait } diff --git a/beacon-chain/blockchain/chain_info_test.go b/beacon-chain/blockchain/chain_info_test.go index 26db5657a5..7243cc7ced 100644 --- a/beacon-chain/blockchain/chain_info_test.go +++ b/beacon-chain/blockchain/chain_info_test.go @@ -84,7 +84,7 @@ func prepareForkchoiceState( func TestHeadRoot_Nil(t *testing.T) { beaconDB := testDB.SetupDB(t) c := setupBeaconChain(t, beaconDB) - headRoot, err := c.HeadRoot(context.Background()) + headRoot, err := c.HeadRoot(t.Context()) require.NoError(t, err) assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value") } @@ -137,7 +137,7 @@ func TestFinalizedBlockHash(t *testing.T) { } func TestUnrealizedJustifiedBlockHash(t *testing.T) { - ctx := context.Background() + ctx := t.Context() service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}} ojc := ðpb.Checkpoint{Root: []byte{'j'}} ofc := ðpb.Checkpoint{Root: []byte{'f'}} @@ -203,7 +203,7 @@ func TestHeadBlock_CanRetrieve(t *testing.T) { c := &Service{} c.head = &head{block: wsb, state: s} - received, err := c.HeadBlock(context.Background()) + received, err := c.HeadBlock(t.Context()) require.NoError(t, err) pb, err := received.Proto() require.NoError(t, err) @@ -215,7 +215,7 @@ func TestHeadState_CanRetrieve(t *testing.T) { require.NoError(t, err) c := &Service{} c.head = &head{state: s} - headState, err := c.HeadState(context.Background()) + headState, err := c.HeadState(t.Context()) require.NoError(t, err) assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received") } @@ -277,7 +277,7 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) { } func TestIsCanonical_Ok(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) c := setupBeaconChain(t, beaconDB) @@ -301,12 +301,12 @@ func TestService_HeadValidatorsIndices(t *testing.T) { c := &Service{} c.head = &head{} - indices, err := c.HeadValidatorsIndices(context.Background(), 0) + indices, err := c.HeadValidatorsIndices(t.Context(), 0) require.NoError(t, err) require.Equal(t, 0, len(indices)) c.head = &head{state: s} - indices, err = c.HeadValidatorsIndices(context.Background(), 0) + indices, err = c.HeadValidatorsIndices(t.Context(), 0) require.NoError(t, err) require.Equal(t, 10, len(indices)) } @@ -331,7 +331,7 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) { // ---------- D func TestService_ChainHeads(t *testing.T) { - ctx := context.Background() + ctx := t.Context() c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}} ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} @@ -399,7 +399,7 @@ func TestService_HeadValidatorIndexToPublicKey(t *testing.T) { c := &Service{} c.head = &head{state: s} - p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0) + p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0) require.NoError(t, err) v, err := s.ValidatorAtIndex(0) @@ -412,12 +412,12 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) { c := &Service{} c.head = nil - p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0) + p, err := c.HeadValidatorIndexToPublicKey(t.Context(), 0) require.NoError(t, err) require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p) c.head = &head{state: nil} - p, err = c.HeadValidatorIndexToPublicKey(context.Background(), 0) + p, err = c.HeadValidatorIndexToPublicKey(t.Context(), 0) require.NoError(t, err) require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p) } @@ -428,7 +428,7 @@ func TestService_IsOptimistic(t *testing.T) { cfg.BellatrixForkEpoch = 0 params.OverrideBeaconConfig(cfg) - ctx := context.Background() + ctx := t.Context() ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}} @@ -456,7 +456,7 @@ func TestService_IsOptimistic(t *testing.T) { } func TestService_IsOptimisticBeforeBellatrix(t *testing.T) { - ctx := context.Background() + ctx := t.Context() c := &Service{genesisTime: time.Now()} opt, err := c.IsOptimistic(ctx) require.NoError(t, err) @@ -464,7 +464,7 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) { } func TestService_IsOptimisticForRoot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}} ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} @@ -482,27 +482,27 @@ func TestService_IsOptimisticForRoot(t *testing.T) { func TestService_IsOptimisticForRoot_DB(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}} c.head = &head{root: params.BeaconConfig().ZeroHash} b := util.NewBeaconBlock() b.Block.Slot = 10 br, err := b.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b) - require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10})) + util.SaveBlock(t, t.Context(), beaconDB, b) + require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: br[:], Slot: 10})) optimisticBlock := util.NewBeaconBlock() optimisticBlock.Block.Slot = 97 optimisticRoot, err := optimisticBlock.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock) + util.SaveBlock(t, t.Context(), beaconDB, optimisticBlock) validatedBlock := util.NewBeaconBlock() validatedBlock.Block.Slot = 9 validatedRoot, err := validatedBlock.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, validatedBlock) + util.SaveBlock(t, t.Context(), beaconDB, validatedBlock) validatedCheckpoint := ðpb.Checkpoint{Root: br[:]} require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint)) @@ -524,10 +524,10 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) { // Before the first finalized epoch, finalized root could be zeros. validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br)) - require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10})) + require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10})) require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint)) - require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11})) + require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11})) optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot) require.NoError(t, err) require.Equal(t, true, optimistic) @@ -535,37 +535,37 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) { func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}} c.head = &head{root: params.BeaconConfig().ZeroHash} b := util.NewBeaconBlock() b.Block.Slot = 10 br, err := b.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b) - require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10})) + util.SaveBlock(t, t.Context(), beaconDB, b) + require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: br[:], Slot: 10})) optimisticBlock := util.NewBeaconBlock() optimisticBlock.Block.Slot = 97 optimisticRoot, err := optimisticBlock.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock) + util.SaveBlock(t, t.Context(), beaconDB, optimisticBlock) validatedBlock := util.NewBeaconBlock() validatedBlock.Block.Slot = 9 validatedRoot, err := validatedBlock.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, validatedBlock) + util.SaveBlock(t, t.Context(), beaconDB, validatedBlock) validatedCheckpoint := ðpb.Checkpoint{Root: br[:]} require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint)) - require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11})) + require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11})) optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot) require.NoError(t, err) require.Equal(t, true, optimistic) - require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9})) + require.NoError(t, beaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9})) validated, err := c.IsOptimisticForRoot(ctx, validatedRoot) require.NoError(t, err) require.Equal(t, true, validated) @@ -574,14 +574,14 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) { func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}} c.head = &head{root: params.BeaconConfig().ZeroHash} b := util.NewBeaconBlock() b.Block.Slot = 10 br, err := b.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b) + util.SaveBlock(t, t.Context(), beaconDB, b) require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{})) _, err = c.IsOptimisticForRoot(ctx, br) assert.NoError(t, err) @@ -594,7 +594,7 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) { func TestService_IsFinalized(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}} r1 := [32]byte{'a'} require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{ @@ -616,7 +616,7 @@ func TestService_IsFinalized(t *testing.T) { func Test_hashForGenesisRoot(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() c := setupBeaconChain(t, beaconDB) st, _ := util.DeterministicGenesisStateElectra(t, 10) require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st)) diff --git a/beacon-chain/blockchain/forkchoice_update_execution_test.go b/beacon-chain/blockchain/forkchoice_update_execution_test.go index 94ebf9e9f6..979eb6787e 100644 --- a/beacon-chain/blockchain/forkchoice_update_execution_test.go +++ b/beacon-chain/blockchain/forkchoice_update_execution_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "context" "testing" "time" @@ -36,29 +35,29 @@ func TestService_isNewHead(t *testing.T) { func TestService_getHeadStateAndBlock(t *testing.T) { beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) - _, _, err := service.getStateAndBlock(context.Background(), [32]byte{}) + _, _, err := service.getStateAndBlock(t.Context(), [32]byte{}) require.ErrorContains(t, "block does not exist", err) blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{Signature: []byte{1}})) require.NoError(t, err) - require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), blk)) + require.NoError(t, service.cfg.BeaconDB.SaveBlock(t.Context(), blk)) st, _ := util.DeterministicGenesisState(t, 1) r, err := blk.Block().HashTreeRoot() require.NoError(t, err) - require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), st, r)) + require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), st, r)) - gotState, err := service.cfg.BeaconDB.State(context.Background(), r) + gotState, err := service.cfg.BeaconDB.State(t.Context(), r) require.NoError(t, err) require.DeepEqual(t, st.ToProto(), gotState.ToProto()) - gotBlk, err := service.cfg.BeaconDB.Block(context.Background(), r) + gotBlk, err := service.cfg.BeaconDB.Block(t.Context(), r) require.NoError(t, err) require.DeepEqual(t, blk, gotBlk) } func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) { - ctx := context.Background() + ctx := t.Context() opts := testServiceOptsWithDB(t) service, err := NewService(ctx, opts...) diff --git a/beacon-chain/blockchain/head_sync_committee_info_test.go b/beacon-chain/blockchain/head_sync_committee_info_test.go index 8bfd8d68d7..53f7a1b406 100644 --- a/beacon-chain/blockchain/head_sync_committee_info_test.go +++ b/beacon-chain/blockchain/head_sync_committee_info_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/cache" @@ -21,18 +20,18 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) { // Current period slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1 - a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot)) + a, err := c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot)) require.NoError(t, err) // Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2 - b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot)) + b, err := c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot)) require.NoError(t, err) require.DeepEqual(t, a, b) // Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1 - b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot)) + b, err = c.HeadSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot)) require.NoError(t, err) require.DeepNotEqual(t, a, b) } @@ -44,7 +43,7 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) { // Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`. slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1 - indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot)) + indices, err := c.headCurrentSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot)) require.NoError(t, err) // NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default. @@ -58,7 +57,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) { // Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`. slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1 - indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot)) + indices, err := c.headNextSyncCommitteeIndices(t.Context(), 0, primitives.Slot(slot)) require.NoError(t, err) // NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices. @@ -72,7 +71,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) { // Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice. slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1 - pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), primitives.Slot(slot), 0) + pubkeys, err := c.HeadSyncCommitteePubKeys(t.Context(), primitives.Slot(slot), 0) require.NoError(t, err) // Any subcommittee should match the subcommittee size. @@ -88,7 +87,7 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) { wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot()) require.NoError(t, err) - d, err := c.HeadSyncCommitteeDomain(context.Background(), 0) + d, err := c.HeadSyncCommitteeDomain(t.Context(), 0) require.NoError(t, err) require.DeepEqual(t, wanted, d) @@ -102,7 +101,7 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) { wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot()) require.NoError(t, err) - d, err := c.HeadSyncContributionProofDomain(context.Background(), 0) + d, err := c.HeadSyncContributionProofDomain(t.Context(), 0) require.NoError(t, err) require.DeepEqual(t, wanted, d) @@ -116,7 +115,7 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) { wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot()) require.NoError(t, err) - d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0) + d, err := c.HeadSyncSelectionProofDomain(t.Context(), 0) require.NoError(t, err) require.DeepEqual(t, wanted, d) diff --git a/beacon-chain/blockchain/head_test.go b/beacon-chain/blockchain/head_test.go index 6408c6eca6..ba82969830 100644 --- a/beacon-chain/blockchain/head_test.go +++ b/beacon-chain/blockchain/head_test.go @@ -34,17 +34,17 @@ func TestSaveHead_Same(t *testing.T) { b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock()) require.NoError(t, err) st, _ := util.DeterministicGenesisState(t, 1) - require.NoError(t, service.saveHead(context.Background(), r, b, st)) + require.NoError(t, service.saveHead(t.Context(), r, b, st)) assert.Equal(t, primitives.Slot(0), service.headSlot(), "Head did not stay the same") assert.Equal(t, r, service.headRoot(), "Head did not stay the same") } func TestSaveHead_Different(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) - oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock()) + oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock()) oldRoot, err := oldBlock.Block().HashTreeRoot() require.NoError(t, err) ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} @@ -61,7 +61,7 @@ func TestSaveHead_Different(t *testing.T) { newHeadSignedBlock.Block.Slot = 1 newHeadBlock := newHeadSignedBlock.Block - wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock) + wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock) newRoot, err := newHeadBlock.HashTreeRoot() require.NoError(t, err) state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc) @@ -74,13 +74,13 @@ func TestSaveHead_Different(t *testing.T) { headState, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, headState.SetSlot(1)) - require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]})) - require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot)) - require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState)) + require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]})) + require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot)) + require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState)) assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change") - cachedRoot, err := service.HeadRoot(context.Background()) + cachedRoot, err := service.HeadRoot(t.Context()) require.NoError(t, err) assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change") headBlock, err := service.headBlock() @@ -92,12 +92,12 @@ func TestSaveHead_Different(t *testing.T) { } func TestSaveHead_Different_Reorg(t *testing.T) { - ctx := context.Background() + ctx := t.Context() hook := logTest.NewGlobal() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) - oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock()) + oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock()) oldRoot, err := oldBlock.Block().HashTreeRoot() require.NoError(t, err) ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} @@ -120,7 +120,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) { newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:] newHeadBlock := newHeadSignedBlock.Block - wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock) + wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock) newRoot, err := newHeadBlock.HashTreeRoot() require.NoError(t, err) state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc) @@ -129,13 +129,13 @@ func TestSaveHead_Different_Reorg(t *testing.T) { headState, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, headState.SetSlot(1)) - require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]})) - require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot)) - require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState)) + require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]})) + require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot)) + require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState)) assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change") - cachedRoot, err := service.HeadRoot(context.Background()) + cachedRoot, err := service.HeadRoot(t.Context()) require.NoError(t, err) if !bytes.Equal(cachedRoot, newRoot[:]) { t.Error("Head did not change") @@ -162,12 +162,12 @@ func Test_notifyNewHeadEvent(t *testing.T) { }, originBlockRoot: [32]byte{1}, } - st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{}) + st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{}) require.NoError(t, err) - require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk)) + require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk)) newHeadStateRoot := [32]byte{2} newHeadRoot := [32]byte{3} - require.NoError(t, srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:])) + require.NoError(t, srv.notifyNewHeadEvent(t.Context(), 1, bState, newHeadStateRoot[:], newHeadRoot[:])) events := notifier.ReceivedEvents() require.Equal(t, 1, len(events)) @@ -194,9 +194,9 @@ func Test_notifyNewHeadEvent(t *testing.T) { }, originBlockRoot: genesisRoot, } - st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{}) + st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{}) require.NoError(t, err) - require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk)) + require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk)) epoch1Start, err := slots.EpochStart(1) require.NoError(t, err) epoch2Start, err := slots.EpochStart(1) @@ -205,7 +205,7 @@ func Test_notifyNewHeadEvent(t *testing.T) { newHeadStateRoot := [32]byte{2} newHeadRoot := [32]byte{3} - err = srv.notifyNewHeadEvent(context.Background(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:]) + err = srv.notifyNewHeadEvent(t.Context(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:]) require.NoError(t, err) events := notifier.ReceivedEvents() require.Equal(t, 1, len(events)) @@ -225,11 +225,11 @@ func Test_notifyNewHeadEvent(t *testing.T) { } func TestRetrieveHead_ReadOnly(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) - oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock()) + oldBlock := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, util.NewBeaconBlock()) oldRoot, err := oldBlock.Block().HashTreeRoot() require.NoError(t, err) service.head = &head{ @@ -243,7 +243,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) { ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} - wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock) + wsb := util.SaveBlock(t, t.Context(), service.cfg.BeaconDB, newHeadSignedBlock) newRoot, err := newHeadBlock.HashTreeRoot() require.NoError(t, err) state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc) @@ -256,9 +256,9 @@ func TestRetrieveHead_ReadOnly(t *testing.T) { headState, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, headState.SetSlot(1)) - require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]})) - require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot)) - require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState)) + require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]})) + require.NoError(t, service.cfg.BeaconDB.SaveState(t.Context(), headState, newRoot)) + require.NoError(t, service.saveHead(t.Context(), newRoot, wsb, headState)) rOnlyState, err := service.HeadStateReadOnly(ctx) require.NoError(t, err) @@ -267,7 +267,7 @@ func TestRetrieveHead_ReadOnly(t *testing.T) { } func TestSaveOrphanedAtts(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) @@ -333,7 +333,7 @@ func TestSaveOrphanedAtts(t *testing.T) { } func TestSaveOrphanedAttsElectra(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) @@ -404,7 +404,7 @@ func TestSaveOrphanedOps(t *testing.T) { config.ShardCommitteePeriod = 0 params.OverrideBeaconConfig(config) - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) @@ -481,7 +481,7 @@ func TestSaveOrphanedOps(t *testing.T) { } func TestSaveOrphanedAtts_CanFilter(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) service.cfg.BLSToExecPool = blstoexec.NewPool() @@ -539,7 +539,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) { } func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) @@ -604,7 +604,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) { } func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) diff --git a/beacon-chain/blockchain/init_sync_process_block_test.go b/beacon-chain/blockchain/init_sync_process_block_test.go index 1dfb38101e..aa036d9b76 100644 --- a/beacon-chain/blockchain/init_sync_process_block_test.go +++ b/beacon-chain/blockchain/init_sync_process_block_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "context" "testing" testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing" @@ -11,7 +10,7 @@ import ( ) func TestService_getBlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) s := setupBeaconChain(t, beaconDB) b1 := util.NewBeaconBlock() @@ -42,7 +41,7 @@ func TestService_getBlock(t *testing.T) { } func TestService_hasBlockInInitSyncOrDB(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) s := setupBeaconChain(t, beaconDB) b1 := util.NewBeaconBlock() diff --git a/beacon-chain/blockchain/metrics_test.go b/beacon-chain/blockchain/metrics_test.go index 8871472984..ed8c572eb2 100644 --- a/beacon-chain/blockchain/metrics_test.go +++ b/beacon-chain/blockchain/metrics_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "context" "testing" eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" @@ -15,7 +14,7 @@ func TestReportEpochMetrics_BadHeadState(t *testing.T) { h, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, h.SetValidators(nil)) - err = reportEpochMetrics(context.Background(), s, h) + err = reportEpochMetrics(t.Context(), s, h) require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err) } @@ -25,7 +24,7 @@ func TestReportEpochMetrics_BadAttestation(t *testing.T) { h, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 0})) - err = reportEpochMetrics(context.Background(), s, h) + err = reportEpochMetrics(t.Context(), s, h) require.ErrorContains(t, "attestation with inclusion delay of 0", err) } @@ -36,6 +35,6 @@ func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) { v.Slashed = true require.NoError(t, h.UpdateValidatorAtIndex(0, v)) require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 1, Data: util.HydrateAttestationData(ð.AttestationData{})})) - err = reportEpochMetrics(context.Background(), h, h) + err = reportEpochMetrics(t.Context(), h, h) require.ErrorContains(t, "slot 0 out of bounds", err) } diff --git a/beacon-chain/blockchain/process_attestation_test.go b/beacon-chain/blockchain/process_attestation_test.go index 76900a8fe7..9cd32d57b5 100644 --- a/beacon-chain/blockchain/process_attestation_test.go +++ b/beacon-chain/blockchain/process_attestation_test.go @@ -161,7 +161,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) { func TestService_GetRecentPreState(t *testing.T) { service, _ := minimalTestService(t) - ctx := context.Background() + ctx := t.Context() s, err := util.NewBeaconState() require.NoError(t, err) @@ -183,7 +183,7 @@ func TestService_GetRecentPreState(t *testing.T) { func TestService_GetAttPreState_Concurrency(t *testing.T) { service, _ := minimalTestService(t) - ctx := context.Background() + ctx := t.Context() s, err := util.NewBeaconState() require.NoError(t, err) @@ -353,21 +353,21 @@ func TestStore_UpdateCheckpointState(t *testing.T) { } func TestAttEpoch_MatchPrevEpoch(t *testing.T) { - ctx := context.Background() + ctx := t.Context() nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)})) } func TestAttEpoch_MatchCurrentEpoch(t *testing.T) { - ctx := context.Background() + ctx := t.Context() nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot require.NoError(t, verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1})) } func TestAttEpoch_NotMatch(t *testing.T) { - ctx := context.Background() + ctx := t.Context() nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot err := verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)}) @@ -375,7 +375,7 @@ func TestAttEpoch_NotMatch(t *testing.T) { } func TestVerifyBeaconBlock_NoBlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() opts := testServiceOptsWithDB(t) service, err := NewService(ctx, opts...) require.NoError(t, err) @@ -385,7 +385,7 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) { } func TestVerifyBeaconBlock_futureBlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() opts := testServiceOptsWithDB(t) service, err := NewService(ctx, opts...) @@ -402,7 +402,7 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) { } func TestVerifyBeaconBlock_OK(t *testing.T) { - ctx := context.Background() + ctx := t.Context() opts := testServiceOptsWithDB(t) service, err := NewService(ctx, opts...) diff --git a/beacon-chain/blockchain/process_block_test.go b/beacon-chain/blockchain/process_block_test.go index 2b94c7cdcf..baee94cbb4 100644 --- a/beacon-chain/blockchain/process_block_test.go +++ b/beacon-chain/blockchain/process_block_test.go @@ -51,7 +51,7 @@ import ( ) func Test_pruneAttsFromPool_Electra(t *testing.T) { - ctx := context.Background() + ctx := t.Context() logHook := logTest.NewGlobal() params.SetupTestConfigCleanup(t) @@ -241,7 +241,7 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) { fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0} require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2)) err = service.fillInForkChoiceMissingBlocks( - context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint()) + t.Context(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint()) require.NoError(t, err) // 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8 @@ -284,7 +284,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) { require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2)) err = service.fillInForkChoiceMissingBlocks( - context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint()) + t.Context(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint()) require.NoError(t, err) // 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8 @@ -294,7 +294,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) { wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]} for i, rt := range wantedRoots { assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i)) - assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt))) + assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(t.Context(), bytesutil.ToBytes32(rt))) } } @@ -340,7 +340,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) { // Set finalized epoch to 2. require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64})) err = service.fillInForkChoiceMissingBlocks( - context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint()) + t.Context(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint()) require.NoError(t, err) // There should be 1 node: block 65 @@ -373,7 +373,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) { require.NoError(t, err) err = service.fillInForkChoiceMissingBlocks( - context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint()) + t.Context(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint()) require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error()) } @@ -451,20 +451,20 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32) wsb, err := consensusblocks.NewSignedBeaconBlock(beaconBlock) require.NoError(t, err) - if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil { + if err := beaconDB.SaveBlock(t.Context(), wsb); err != nil { return nil, err } - if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { + if err := beaconDB.SaveState(t.Context(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { return nil, errors.Wrap(err, "could not save state") } } - if err := beaconDB.SaveState(context.Background(), st.Copy(), r1); err != nil { + if err := beaconDB.SaveState(t.Context(), st.Copy(), r1); err != nil { return nil, err } - if err := beaconDB.SaveState(context.Background(), st.Copy(), r7); err != nil { + if err := beaconDB.SaveState(t.Context(), st.Copy(), r7); err != nil { return nil, err } - if err := beaconDB.SaveState(context.Background(), st.Copy(), r8); err != nil { + if err := beaconDB.SaveState(t.Context(), st.Copy(), r8); err != nil { return nil, err } return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil @@ -477,7 +477,7 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) { require.Equal(t, primitives.Slot(0), slot, "Unexpected slot") } func TestAncestorByDB_CtxErr(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) opts := testServiceOptsWithDB(t) service, err := NewService(ctx, opts...) require.NoError(t, err) @@ -510,18 +510,18 @@ func TestAncestor_HandleSkipSlot(t *testing.T) { beaconBlock := util.NewBeaconBlock() beaconBlock.Block.Slot = b.Block.Slot beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32) - util.SaveBlock(t, context.Background(), beaconDB, beaconBlock) + util.SaveBlock(t, t.Context(), beaconDB, beaconBlock) } // Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block. - r, err := service.Ancestor(context.Background(), r200[:], 150) + r, err := service.Ancestor(t.Context(), r200[:], 150) require.NoError(t, err) if bytesutil.ToBytes32(r) != r100 { t.Error("Did not get correct root") } // Slots 1 to 100 are skip slots. Requesting root at 50 will yield root at 1. The last physical block. - r, err = service.Ancestor(context.Background(), r200[:], 50) + r, err = service.Ancestor(t.Context(), r200[:], 50) require.NoError(t, err) if bytesutil.ToBytes32(r) != r1 { t.Error("Did not get correct root") @@ -529,7 +529,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) { } func TestAncestor_CanUseForkchoice(t *testing.T) { - ctx := context.Background() + ctx := t.Context() opts := testServiceOptsWithDB(t) service, err := NewService(ctx, opts...) require.NoError(t, err) @@ -557,12 +557,12 @@ func TestAncestor_CanUseForkchoice(t *testing.T) { beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32) r, err := b.Block.HashTreeRoot() require.NoError(t, err) - st, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, ojc, ofc) + st, blkRoot, err := prepareForkchoiceState(t.Context(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, ojc, ofc) require.NoError(t, err) require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot)) } - r, err := service.Ancestor(context.Background(), r200[:], 150) + r, err := service.Ancestor(t.Context(), r200[:], 150) require.NoError(t, err) if bytesutil.ToBytes32(r) != r100 { t.Error("Did not get correct root") @@ -594,14 +594,14 @@ func TestAncestor_CanUseDB(t *testing.T) { beaconBlock := util.NewBeaconBlock() beaconBlock.Block.Slot = b.Block.Slot beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32) - util.SaveBlock(t, context.Background(), beaconDB, beaconBlock) + util.SaveBlock(t, t.Context(), beaconDB, beaconBlock) } - st, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, ojc, ofc) + st, blkRoot, err := prepareForkchoiceState(t.Context(), 200, r200, r200, params.BeaconConfig().ZeroHash, ojc, ofc) require.NoError(t, err) require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot)) - r, err := service.Ancestor(context.Background(), r200[:], 150) + r, err := service.Ancestor(t.Context(), r200[:], 150) require.NoError(t, err) if bytesutil.ToBytes32(r) != r100 { t.Error("Did not get correct root") @@ -609,7 +609,7 @@ func TestAncestor_CanUseDB(t *testing.T) { } func TestEnsureRootNotZeroHashes(t *testing.T) { - ctx := context.Background() + ctx := t.Context() opts := testServiceOptsNoDB() service, err := NewService(ctx, opts...) require.NoError(t, err) @@ -623,7 +623,7 @@ func TestEnsureRootNotZeroHashes(t *testing.T) { } func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() opts := testServiceOptsNoDB() service, err := NewService(ctx, opts...) require.NoError(t, err) @@ -922,7 +922,7 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) { r, err := b.Block.HashTreeRoot() require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]})) @@ -935,7 +935,7 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) { require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts)) wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - service.pruneAttsFromPool(context.Background(), nil /* state not needed pre-Electra */, wsb) + service.pruneAttsFromPool(t.Context(), nil /* state not needed pre-Electra */, wsb) require.LogsDoNotContain(t, logHook, "Could not prune attestations") require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount()) } @@ -2667,7 +2667,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) { require.Equal(t, true, hasState) // Set deadlined context when processing the block - cancCtx, canc := context.WithCancel(context.Background()) + cancCtx, canc := context.WithCancel(t.Context()) canc() roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root) require.NoError(t, err) @@ -3315,7 +3315,7 @@ type testIsAvailableParams struct { } func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t) options := append(params.options, WithDataColumnStorage(dataColumnStorage)) diff --git a/beacon-chain/blockchain/receive_attestation_test.go b/beacon-chain/blockchain/receive_attestation_test.go index c16fb69291..b3928c0c1d 100644 --- a/beacon-chain/blockchain/receive_attestation_test.go +++ b/beacon-chain/blockchain/receive_attestation_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "context" "testing" "time" @@ -32,7 +31,7 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) { service.genesisTime = time.Now() e := primitives.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1) - _, err := service.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e}) + _, err := service.AttestationTargetState(t.Context(), ðpb.Checkpoint{Epoch: e}) require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err) } @@ -56,11 +55,11 @@ func TestVerifyLMDFFGConsistent(t *testing.T) { a.Data.Target.Root = []byte{'c'} r33Root := r33.Root() a.Data.BeaconBlockRoot = r33Root[:] - require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a)) + require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(t.Context(), a)) r32Root := r32.Root() a.Data.Target.Root = r32Root[:] - err = service.VerifyLmdFfgConsistency(context.Background(), a) + err = service.VerifyLmdFfgConsistency(t.Context(), a) require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent") } diff --git a/beacon-chain/blockchain/receive_block_test.go b/beacon-chain/blockchain/receive_block_test.go index f40469f656..c0a89b6cb7 100644 --- a/beacon-chain/blockchain/receive_block_test.go +++ b/beacon-chain/blockchain/receive_block_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "context" "sync" "testing" "time" @@ -28,7 +27,7 @@ import ( ) func TestService_ReceiveBlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis, keys := util.DeterministicGenesisState(t, 64) copiedGen := genesis.Copy() @@ -189,7 +188,7 @@ func TestHandleDA(t *testing.T) { require.NoError(t, err) s, _ := minimalTestService(t) - elapsed, err := s.handleDA(context.Background(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil) + elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil) require.NoError(t, err) require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0") } @@ -228,7 +227,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) { } func TestService_ReceiveBlockBatch(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis, keys := util.DeterministicGenesisState(t, 64) genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot primitives.Slot) *ethpb.SignedBeaconBlock { @@ -293,23 +292,23 @@ func TestService_ReceiveBlockBatch(t *testing.T) { func TestService_HasBlock(t *testing.T) { s, _ := minimalTestService(t) r := [32]byte{'a'} - if s.HasBlock(context.Background(), r) { + if s.HasBlock(t.Context(), r) { t.Error("Should not have block") } wsb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock()) require.NoError(t, err) - require.NoError(t, s.saveInitSyncBlock(context.Background(), r, wsb)) - if !s.HasBlock(context.Background(), r) { + require.NoError(t, s.saveInitSyncBlock(t.Context(), r, wsb)) + if !s.HasBlock(t.Context(), r) { t.Error("Should have block") } b := util.NewBeaconBlock() b.Block.Slot = 1 - util.SaveBlock(t, context.Background(), s.cfg.BeaconDB, b) + util.SaveBlock(t, t.Context(), s.cfg.BeaconDB, b) r, err = b.Block.HashTreeRoot() require.NoError(t, err) - require.Equal(t, true, s.HasBlock(context.Background(), r)) + require.Equal(t, true, s.HasBlock(t.Context(), r)) s.blockBeingSynced.set(r) - require.Equal(t, false, s.HasBlock(context.Background(), r)) + require.Equal(t, false, s.HasBlock(t.Context(), r)) } func TestCheckSaveHotStateDB_Enabling(t *testing.T) { @@ -318,7 +317,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) { st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB)) s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) - require.NoError(t, s.checkSaveHotStateDB(context.Background())) + require.NoError(t, s.checkSaveHotStateDB(t.Context())) assert.LogsContain(t, hook, "Entering mode to save hot states in DB") } @@ -329,10 +328,10 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) { st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB)) s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) - require.NoError(t, s.checkSaveHotStateDB(context.Background())) + require.NoError(t, s.checkSaveHotStateDB(t.Context())) s.genesisTime = time.Now() - require.NoError(t, s.checkSaveHotStateDB(context.Background())) + require.NoError(t, s.checkSaveHotStateDB(t.Context())) assert.LogsContain(t, hook, "Exiting mode to save hot states in DB") } @@ -341,7 +340,7 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) { s, _ := minimalTestService(t) s.genesisTime = time.Now() - require.NoError(t, s.checkSaveHotStateDB(context.Background())) + require.NoError(t, s.checkSaveHotStateDB(t.Context())) assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB") } @@ -456,7 +455,7 @@ func Test_executePostFinalizationTasks(t *testing.T) { headState, err := util.NewBeaconStateElectra() require.NoError(t, err) - finalizedStRoot, err := headState.HashTreeRoot(context.Background()) + finalizedStRoot, err := headState.HashTreeRoot(t.Context()) require.NoError(t, err) genesis := util.NewBeaconBlock() diff --git a/beacon-chain/blockchain/service_norace_test.go b/beacon-chain/blockchain/service_norace_test.go index 8b1bcae9a2..79413b2804 100644 --- a/beacon-chain/blockchain/service_norace_test.go +++ b/beacon-chain/blockchain/service_norace_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "context" "io" "testing" @@ -26,7 +25,7 @@ func TestChainService_SaveHead_DataRace(t *testing.T) { st, _ := util.DeterministicGenesisState(t, 1) require.NoError(t, err) go func() { - require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st)) + require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st)) }() - require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st)) + require.NoError(t, s.saveHead(t.Context(), [32]byte{}, b, st)) } diff --git a/beacon-chain/blockchain/service_test.go b/beacon-chain/blockchain/service_test.go index 523452c067..04f14f317a 100644 --- a/beacon-chain/blockchain/service_test.go +++ b/beacon-chain/blockchain/service_test.go @@ -42,7 +42,7 @@ import ( ) func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service { - ctx := context.Background() + ctx := t.Context() var web3Service *execution.Service var err error srv, endpoint, err := mockExecution.SetupRPCServer() @@ -115,7 +115,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service { func TestChainStartStop_Initialized(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) chainService := setupBeaconChain(t, beaconDB) @@ -152,7 +152,7 @@ func TestChainStartStop_Initialized(t *testing.T) { func TestChainStartStop_GenesisZeroHashes(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) chainService := setupBeaconChain(t, beaconDB) @@ -184,7 +184,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) { func TestChainService_InitializeBeaconChain(t *testing.T) { helpers.ClearCache() beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() bc := setupBeaconChain(t, beaconDB) var err error @@ -226,7 +226,7 @@ func TestChainService_InitializeBeaconChain(t *testing.T) { } func TestChainService_CorrectGenesisRoots(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) chainService := setupBeaconChain(t, beaconDB) @@ -295,7 +295,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) { require.NoError(t, err) assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect") assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect") - r, err := c.HeadRoot(context.Background()) + r, err := c.HeadRoot(t.Context()) require.NoError(t, err) if !bytes.Equal(headRoot[:], r) { t.Error("head slot incorrect") @@ -346,7 +346,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) { func TestChainService_SaveHeadNoDB(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() fc := doublylinkedtree.New() s := &Service{ cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, fc), ForkChoiceStore: fc}, @@ -370,7 +370,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) { } func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) s := &Service{ cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB}, @@ -391,7 +391,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) { } func TestServiceStop_SaveCachedBlocks(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) beaconDB := testDB.SetupDB(t) s := &Service{ cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())}, @@ -410,13 +410,13 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) { } func TestProcessChainStartTime_ReceivedFeed(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) mgs := &MockClockSetter{} service.clockSetter = mgs gt := time.Now() - service.onExecutionChainStart(context.Background(), gt) + service.onExecutionChainStart(t.Context(), gt) gs, err := beaconDB.GenesisState(ctx) require.NoError(t, err) require.NotEqual(t, nil, gs) @@ -429,7 +429,7 @@ func TestProcessChainStartTime_ReceivedFeed(t *testing.T) { func BenchmarkHasBlockDB(b *testing.B) { beaconDB := testDB.SetupDB(b) - ctx := context.Background() + ctx := b.Context() s := &Service{ cfg: &config{BeaconDB: beaconDB}, } @@ -447,7 +447,7 @@ func BenchmarkHasBlockDB(b *testing.B) { } func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) { - ctx := context.Background() + ctx := b.Context() beaconDB := testDB.SetupDB(b) s := &Service{ cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB}, diff --git a/beacon-chain/blockchain/setup_test.go b/beacon-chain/blockchain/setup_test.go index d0041ecb80..bf172bf0f9 100644 --- a/beacon-chain/blockchain/setup_test.go +++ b/beacon-chain/blockchain/setup_test.go @@ -108,7 +108,7 @@ type testServiceRequirements struct { } func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceRequirements) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) fcs := doublylinkedtree.New() sg := stategen.New(beaconDB, fcs) diff --git a/beacon-chain/blockchain/weak_subjectivity_checks_test.go b/beacon-chain/blockchain/weak_subjectivity_checks_test.go index 0ac61c9043..a1207ee713 100644 --- a/beacon-chain/blockchain/weak_subjectivity_checks_test.go +++ b/beacon-chain/blockchain/weak_subjectivity_checks_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "context" "testing" testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing" @@ -22,7 +21,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) { b := util.NewBeaconBlock() b.Block.Slot = 1792480 - util.SaveBlock(t, context.Background(), beaconDB, b) + util.SaveBlock(t, t.Context(), beaconDB, b) r, err := b.Block.HashTreeRoot() require.NoError(t, err) @@ -79,7 +78,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) { } require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch})) cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint() - err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch) + err = s.wsVerifier.VerifyWeakSubjectivity(t.Context(), cp.Epoch) if tt.wantErr == nil { require.NoError(t, err) } else { diff --git a/beacon-chain/builder/service_test.go b/beacon-chain/builder/service_test.go index 3151202e31..426e19b6f7 100644 --- a/beacon-chain/builder/service_test.go +++ b/beacon-chain/builder/service_test.go @@ -1,7 +1,6 @@ package builder import ( - "context" "testing" "time" @@ -15,19 +14,19 @@ import ( ) func Test_NewServiceWithBuilder(t *testing.T) { - s, err := NewService(context.Background(), WithBuilderClient(&buildertesting.MockClient{})) + s, err := NewService(t.Context(), WithBuilderClient(&buildertesting.MockClient{})) require.NoError(t, err) assert.Equal(t, true, s.Configured()) } func Test_NewServiceWithoutBuilder(t *testing.T) { - s, err := NewService(context.Background()) + s, err := NewService(t.Context()) require.NoError(t, err) assert.Equal(t, false, s.Configured()) } func Test_RegisterValidator(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbtesting.SetupDB(t) headFetcher := &blockchainTesting.ChainService{} builder := buildertesting.NewClient() @@ -40,7 +39,7 @@ func Test_RegisterValidator(t *testing.T) { } func Test_RegisterValidator_WithCache(t *testing.T) { - ctx := context.Background() + ctx := t.Context() headFetcher := &blockchainTesting.ChainService{} builder := buildertesting.NewClient() s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder)) @@ -55,16 +54,16 @@ func Test_RegisterValidator_WithCache(t *testing.T) { } func Test_BuilderMethodsWithouClient(t *testing.T) { - s, err := NewService(context.Background()) + s, err := NewService(t.Context()) require.NoError(t, err) assert.Equal(t, false, s.Configured()) - _, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{}) + _, err = s.GetHeader(t.Context(), 0, [32]byte{}, [48]byte{}) assert.ErrorContains(t, ErrNoBuilder.Error(), err) - _, _, err = s.SubmitBlindedBlock(context.Background(), nil) + _, _, err = s.SubmitBlindedBlock(t.Context(), nil) assert.ErrorContains(t, ErrNoBuilder.Error(), err) - err = s.RegisterValidator(context.Background(), nil) + err = s.RegisterValidator(t.Context(), nil) assert.ErrorContains(t, ErrNoBuilder.Error(), err) } diff --git a/beacon-chain/cache/committee_fuzz_test.go b/beacon-chain/cache/committee_fuzz_test.go index 533a628b44..cd2fcf459d 100644 --- a/beacon-chain/cache/committee_fuzz_test.go +++ b/beacon-chain/cache/committee_fuzz_test.go @@ -3,7 +3,6 @@ package cache import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/testing/assert" @@ -30,8 +29,8 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) { for i := 0; i < 100000; i++ { fuzzer.Fuzz(c) - require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c)) - _, err := cache.Committee(context.Background(), 0, c.Seed, 0) + require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c)) + _, err := cache.Committee(t.Context(), 0, c.Seed, 0) require.NoError(t, err) } @@ -45,9 +44,9 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) { for i := 0; i < 100000; i++ { fuzzer.Fuzz(c) - require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c)) + require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c)) - indices, err := cache.ActiveIndices(context.Background(), c.Seed) + indices, err := cache.ActiveIndices(t.Context(), c.Seed) require.NoError(t, err) assert.DeepEqual(t, c.SortedIndices, indices) } diff --git a/beacon-chain/cache/committee_test.go b/beacon-chain/cache/committee_test.go index d5ea45b6a1..ab3dfa7ac4 100644 --- a/beacon-chain/cache/committee_test.go +++ b/beacon-chain/cache/committee_test.go @@ -44,15 +44,15 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) { slot := params.BeaconConfig().SlotsPerEpoch committeeIndex := primitives.CommitteeIndex(1) - indices, err := cache.Committee(context.Background(), slot, item.Seed, committeeIndex) + indices, err := cache.Committee(t.Context(), slot, item.Seed, committeeIndex) require.NoError(t, err) if indices != nil { t.Error("Expected committee not to exist in empty cache") } - require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item)) + require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item)) wantedIndex := primitives.CommitteeIndex(0) - indices, err = cache.Committee(context.Background(), slot, item.Seed, wantedIndex) + indices, err = cache.Committee(t.Context(), slot, item.Seed, wantedIndex) require.NoError(t, err) start, end := startEndIndices(item, uint64(wantedIndex)) @@ -63,15 +63,15 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) { cache := NewCommitteesCache() item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}} - indices, err := cache.ActiveIndices(context.Background(), item.Seed) + indices, err := cache.ActiveIndices(t.Context(), item.Seed) require.NoError(t, err) if indices != nil { t.Error("Expected committee not to exist in empty cache") } - require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item)) + require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item)) - indices, err = cache.ActiveIndices(context.Background(), item.Seed) + indices, err = cache.ActiveIndices(t.Context(), item.Seed) require.NoError(t, err) assert.DeepEqual(t, item.SortedIndices, indices) } @@ -80,13 +80,13 @@ func TestCommitteeCache_ActiveCount(t *testing.T) { cache := NewCommitteesCache() item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}} - count, err := cache.ActiveIndicesCount(context.Background(), item.Seed) + count, err := cache.ActiveIndicesCount(t.Context(), item.Seed) require.NoError(t, err) assert.Equal(t, 0, count, "Expected active count not to exist in empty cache") - require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item)) + require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item)) - count, err = cache.ActiveIndicesCount(context.Background(), item.Seed) + count, err = cache.ActiveIndicesCount(t.Context(), item.Seed) require.NoError(t, err) assert.Equal(t, len(item.SortedIndices), count) } @@ -100,7 +100,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) { for i := start; i < end; i++ { s := []byte(strconv.Itoa(i)) item := &Committees{Seed: bytesutil.ToBytes32(s)} - require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item)) + require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), item)) } k := cache.CommitteeCache.Keys() @@ -130,7 +130,7 @@ func TestCommitteeCacheOutOfRange(t *testing.T) { assert.NoError(t, err) _ = cache.CommitteeCache.Add(key, comms) - _, err = cache.Committee(context.Background(), 0, seed, math.MaxUint64) // Overflow! + _, err = cache.Committee(t.Context(), 0, seed, math.MaxUint64) // Overflow! require.NotNil(t, err, "Did not fail as expected") } @@ -138,15 +138,15 @@ func TestCommitteeCache_DoesNothingWhenCancelledContext(t *testing.T) { cache := NewCommitteesCache() item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}} - count, err := cache.ActiveIndicesCount(context.Background(), item.Seed) + count, err := cache.ActiveIndicesCount(t.Context(), item.Seed) require.NoError(t, err) assert.Equal(t, 0, count, "Expected active count not to exist in empty cache") - cancelled, cancel := context.WithCancel(context.Background()) + cancelled, cancel := context.WithCancel(t.Context()) cancel() require.ErrorIs(t, cache.AddCommitteeShuffledList(cancelled, item), context.Canceled) - count, err = cache.ActiveIndicesCount(context.Background(), item.Seed) + count, err = cache.ActiveIndicesCount(t.Context(), item.Seed) require.NoError(t, err) assert.Equal(t, 0, count) } diff --git a/beacon-chain/cache/depositsnapshot/deposit_cache_test.go b/beacon-chain/cache/depositsnapshot/deposit_cache_test.go index 05d01d074c..f354753241 100644 --- a/beacon-chain/cache/depositsnapshot/deposit_cache_test.go +++ b/beacon-chain/cache/depositsnapshot/deposit_cache_test.go @@ -2,7 +2,6 @@ package depositsnapshot import ( "bytes" - "context" "fmt" "math/big" "testing" @@ -55,7 +54,7 @@ func TestAllDeposits_ReturnsAllDeposits(t *testing.T) { } dc.deposits = deposits - d := dc.AllDeposits(context.Background(), nil) + d := dc.AllDeposits(t.Context(), nil) assert.Equal(t, len(deposits), len(d)) } @@ -95,7 +94,7 @@ func TestAllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) { } dc.deposits = deposits - d := dc.AllDeposits(context.Background(), big.NewInt(11)) + d := dc.AllDeposits(t.Context(), big.NewInt(11)) assert.Equal(t, 5, len(d)) } @@ -127,7 +126,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) { DepositRoot: wantedRoot, }, } - n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(13)) + n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(13)) assert.Equal(t, 4, int(n)) require.DeepEqual(t, wantedRoot, root[:]) }) @@ -143,7 +142,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) { DepositRoot: wantedRoot, }, } - n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10)) + n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10)) assert.Equal(t, 1, int(n)) require.DeepEqual(t, wantedRoot, root[:]) }) @@ -169,7 +168,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) { Deposit: ðpb.Deposit{}, }, } - n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10)) + n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10)) assert.Equal(t, 2, int(n)) require.DeepEqual(t, wantedRoot, root[:]) }) @@ -185,7 +184,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) { DepositRoot: wantedRoot, }, } - n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(7)) + n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(7)) assert.Equal(t, 0, int(n)) require.DeepEqual(t, params.BeaconConfig().ZeroHash, root) }) @@ -201,7 +200,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) { DepositRoot: wantedRoot, }, } - n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10)) + n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(10)) assert.Equal(t, 1, int(n)) require.DeepEqual(t, wantedRoot, root[:]) }) @@ -237,7 +236,7 @@ func TestDepositsNumberAndRootAtHeight(t *testing.T) { Deposit: ðpb.Deposit{}, }, } - n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(9)) + n, root := dc.DepositsNumberAndRootAtHeight(t.Context(), big.NewInt(9)) assert.Equal(t, 3, int(n)) require.DeepEqual(t, wantedRoot, root[:]) }) @@ -288,10 +287,10 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) { }, }, } - dc.InsertDepositContainers(context.Background(), ctrs) + dc.InsertDepositContainers(t.Context(), ctrs) pk1 := bytesutil.PadTo([]byte("pk1"), 48) - dep, blkNum := dc.DepositByPubkey(context.Background(), pk1) + dep, blkNum := dc.DepositByPubkey(t.Context(), pk1) if dep == nil || !bytes.Equal(dep.Data.PublicKey, pk1) { t.Error("Returned wrong deposit") @@ -303,7 +302,7 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) { func TestInsertDepositContainers_NotNil(t *testing.T) { dc, err := New() require.NoError(t, err) - dc.InsertDepositContainers(context.Background(), nil) + dc.InsertDepositContainers(t.Context(), nil) assert.DeepEqual(t, []*ethpb.DepositContainer{}, dc.deposits) } @@ -359,10 +358,10 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) { err = dc.finalizedDeposits.depositTree.pushLeaf(root) require.NoError(t, err) } - err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0) require.NoError(t, err) - cachedDeposits, err := dc.FinalizedDeposits(context.Background()) + cachedDeposits, err := dc.FinalizedDeposits(t.Context()) require.NoError(t, err) require.NotNil(t, cachedDeposits, "Deposits not cached") assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex()) @@ -425,15 +424,15 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) { err = dc.finalizedDeposits.Deposits().Insert(root[:], 0) require.NoError(t, err) } - err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0) require.NoError(t, err) dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...) - cachedDeposits, err := dc.FinalizedDeposits(context.Background()) + cachedDeposits, err := dc.FinalizedDeposits(t.Context()) require.NoError(t, err) require.NotNil(t, cachedDeposits, "Deposits not cached") require.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex()) @@ -459,10 +458,10 @@ func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) { dc, err := New() require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0) require.NoError(t, err) - cachedDeposits, err := dc.FinalizedDeposits(context.Background()) + cachedDeposits, err := dc.FinalizedDeposits(t.Context()) require.NoError(t, err) require.NotNil(t, cachedDeposits, "Deposits not cached") assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex()) @@ -509,10 +508,10 @@ func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) { } dc.deposits = finalizedDeposits - err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0) require.NoError(t, err) - cachedDeposits, err := dc.FinalizedDeposits(context.Background()) + cachedDeposits, err := dc.FinalizedDeposits(t.Context()) require.NoError(t, err) require.NotNil(t, cachedDeposits, "Deposits not cached") assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex()) @@ -592,14 +591,14 @@ func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) { } dc.deposits = finalizedDeposits - err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0) require.NoError(t, err) // Reinsert finalized deposits with a lower index. - err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0) require.NoError(t, err) - cachedDeposits, err := dc.FinalizedDeposits(context.Background()) + cachedDeposits, err := dc.FinalizedDeposits(t.Context()) require.NoError(t, err) require.NotNil(t, cachedDeposits, "Deposits not cached") assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex()) @@ -670,10 +669,10 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) { Index: 3, DepositRoot: rootCreator('D'), }) - err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0) require.NoError(t, err) - deps := dc.NonFinalizedDeposits(context.Background(), 1, nil) + deps := dc.NonFinalizedDeposits(t.Context(), 1, nil) assert.Equal(t, 2, len(deps)) } @@ -681,7 +680,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits_Nil(t *testing.T) { dc, err := New() require.NoError(t, err) - deps := dc.NonFinalizedDeposits(context.Background(), 0, nil) + deps := dc.NonFinalizedDeposits(t.Context(), 0, nil) assert.Equal(t, 0, len(deps)) } @@ -740,10 +739,10 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test Index: 3, DepositRoot: rootCreator('D'), }) - err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0) require.NoError(t, err) - deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10)) + deps := dc.NonFinalizedDeposits(t.Context(), 1, big.NewInt(10)) assert.Equal(t, 1, len(deps)) } @@ -785,21 +784,21 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) { assert.NoError(t, err) // Perform this in a nonsensical ordering - err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 1, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 2, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 3, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 4, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 4, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 4, [32]byte{}, 0) require.NoError(t, err) // Mimic finalized deposit trie fetch. - fd, err := dc.FinalizedDeposits(context.Background()) + fd, err := dc.FinalizedDeposits(t.Context()) require.NoError(t, err) - deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), nil) + deps := dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), nil) insertIndex := fd.MerkleTrieIndex() + 1 for _, dep := range deps { @@ -810,24 +809,24 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) { } insertIndex++ } - err = dc.InsertFinalizedDeposits(context.Background(), 5, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 5, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 6, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 6, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 9, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 9, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 12, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 12, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 15, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 15, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 15, [32]byte{}, 0) require.NoError(t, err) - err = dc.InsertFinalizedDeposits(context.Background(), 14, [32]byte{}, 0) + err = dc.InsertFinalizedDeposits(t.Context(), 14, [32]byte{}, 0) require.NoError(t, err) - fd, err = dc.FinalizedDeposits(context.Background()) + fd, err = dc.FinalizedDeposits(t.Context()) require.NoError(t, err) - deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), nil) + deps = dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), nil) insertIndex = fd.MerkleTrieIndex() + 1 for _, dep := range dc.deposits { @@ -888,9 +887,9 @@ func TestMin(t *testing.T) { } dc.deposits = finalizedDeposits - fd, err := dc.FinalizedDeposits(context.Background()) + fd, err := dc.FinalizedDeposits(t.Context()) require.NoError(t, err) - deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex(), big.NewInt(16)) + deps := dc.NonFinalizedDeposits(t.Context(), fd.MerkleTrieIndex(), big.NewInt(16)) insertIndex := fd.MerkleTrieIndex() + 1 for _, dep := range deps { depHash, err := dep.Data.HashTreeRoot() @@ -908,28 +907,28 @@ func TestDepositMap_WorksCorrectly(t *testing.T) { require.NoError(t, err) pk0 := bytesutil.PadTo([]byte("pk0"), 48) - dep, _ := dc.DepositByPubkey(context.Background(), pk0) + dep, _ := dc.DepositByPubkey(t.Context(), pk0) var nilDep *ethpb.Deposit assert.DeepEqual(t, nilDep, dep) dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 1000}} - assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 0, [32]byte{})) + assert.NoError(t, dc.InsertDeposit(t.Context(), dep, 1000, 0, [32]byte{})) - dep, _ = dc.DepositByPubkey(context.Background(), pk0) + dep, _ = dc.DepositByPubkey(t.Context(), pk0) assert.NotEqual(t, nilDep, dep) assert.Equal(t, uint64(1000), dep.Data.Amount) dep = ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: pk0, Amount: 10000}} - assert.NoError(t, dc.InsertDeposit(context.Background(), dep, 1000, 1, [32]byte{})) + assert.NoError(t, dc.InsertDeposit(t.Context(), dep, 1000, 1, [32]byte{})) // Make sure we have the same deposit returned over here. - dep, _ = dc.DepositByPubkey(context.Background(), pk0) + dep, _ = dc.DepositByPubkey(t.Context(), pk0) assert.NotEqual(t, nilDep, dep) assert.Equal(t, uint64(1000), dep.Data.Amount) // Make sure another key doesn't work. pk1 := bytesutil.PadTo([]byte("pk1"), 48) - dep, _ = dc.DepositByPubkey(context.Background(), pk1) + dep, _ = dc.DepositByPubkey(t.Context(), pk1) assert.DeepEqual(t, nilDep, dep) } diff --git a/beacon-chain/cache/depositsnapshot/deposit_fetcher_test.go b/beacon-chain/cache/depositsnapshot/deposit_fetcher_test.go index 0dcd3e7961..a559698ddd 100644 --- a/beacon-chain/cache/depositsnapshot/deposit_fetcher_test.go +++ b/beacon-chain/cache/depositsnapshot/deposit_fetcher_test.go @@ -1,7 +1,6 @@ package depositsnapshot import ( - "context" "math/big" "testing" @@ -13,14 +12,14 @@ var _ PendingDepositsFetcher = (*Cache)(nil) func TestInsertPendingDeposit_OK(t *testing.T) { dc := Cache{} - dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, 111, 100, [32]byte{}) + dc.InsertPendingDeposit(t.Context(), ðpb.Deposit{}, 111, 100, [32]byte{}) assert.Equal(t, 1, len(dc.pendingDeposits), "deposit not inserted") } func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) { dc := Cache{} - dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{}) + dc.InsertPendingDeposit(t.Context(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{}) assert.Equal(t, 0, len(dc.pendingDeposits)) } @@ -34,13 +33,13 @@ func TestPendingDeposits_OK(t *testing.T) { {Eth1BlockHeight: 6, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}}, } - deposits := dc.PendingDeposits(context.Background(), big.NewInt(4)) + deposits := dc.PendingDeposits(t.Context(), big.NewInt(4)) expected := []*ethpb.Deposit{ {Proof: [][]byte{[]byte("A")}}, {Proof: [][]byte{[]byte("B")}}, } assert.DeepSSZEqual(t, expected, deposits) - all := dc.PendingDeposits(context.Background(), nil) + all := dc.PendingDeposits(t.Context(), nil) assert.Equal(t, len(dc.pendingDeposits), len(all), "PendingDeposits(ctx, nil) did not return all deposits") } diff --git a/beacon-chain/cache/depositsnapshot/deposit_pruner_test.go b/beacon-chain/cache/depositsnapshot/deposit_pruner_test.go index 99810b2aa9..e4eef928c7 100644 --- a/beacon-chain/cache/depositsnapshot/deposit_pruner_test.go +++ b/beacon-chain/cache/depositsnapshot/deposit_pruner_test.go @@ -1,7 +1,6 @@ package depositsnapshot import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/encoding/bytesutil" @@ -22,7 +21,7 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) { {Eth1BlockHeight: 12, Index: 12}, } - dc.PrunePendingDeposits(context.Background(), 0) + dc.PrunePendingDeposits(t.Context(), 0) expected := []*ethpb.DepositContainer{ {Eth1BlockHeight: 2, Index: 2}, {Eth1BlockHeight: 4, Index: 4}, @@ -46,7 +45,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) { {Eth1BlockHeight: 12, Index: 12}, } - dc.PrunePendingDeposits(context.Background(), 6) + dc.PrunePendingDeposits(t.Context(), 6) expected := []*ethpb.DepositContainer{ {Eth1BlockHeight: 6, Index: 6}, {Eth1BlockHeight: 8, Index: 8}, @@ -65,7 +64,7 @@ func TestPrunePendingDeposits_OK(t *testing.T) { {Eth1BlockHeight: 12, Index: 12}, } - dc.PrunePendingDeposits(context.Background(), 10) + dc.PrunePendingDeposits(t.Context(), 10) expected = []*ethpb.DepositContainer{ {Eth1BlockHeight: 10, Index: 10}, {Eth1BlockHeight: 12, Index: 12}, @@ -86,7 +85,7 @@ func TestPruneAllPendingDeposits(t *testing.T) { {Eth1BlockHeight: 12, Index: 12}, } - dc.PruneAllPendingDeposits(context.Background()) + dc.PruneAllPendingDeposits(t.Context()) expected := []*ethpb.DepositContainer{} assert.DeepEqual(t, expected, dc.pendingDeposits) @@ -128,10 +127,10 @@ func TestPruneProofs_Ok(t *testing.T) { } for _, ins := range deposits { - assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) + assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) } - require.NoError(t, dc.PruneProofs(context.Background(), 1)) + require.NoError(t, dc.PruneProofs(t.Context(), 1)) assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof) assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof) @@ -173,10 +172,10 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) { } for _, ins := range deposits { - assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) + assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) } - require.NoError(t, dc.PruneProofs(context.Background(), 2)) + require.NoError(t, dc.PruneProofs(t.Context(), 2)) assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof) } @@ -217,10 +216,10 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) { } for _, ins := range deposits { - assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) + assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) } - require.NoError(t, dc.PruneProofs(context.Background(), 99)) + require.NoError(t, dc.PruneProofs(t.Context(), 99)) assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof) assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof) @@ -264,10 +263,10 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) { } for _, ins := range deposits { - assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) + assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) } - require.NoError(t, dc.PruneProofs(context.Background(), 4)) + require.NoError(t, dc.PruneProofs(t.Context(), 4)) assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof) assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof) @@ -311,10 +310,10 @@ func TestPruneAllProofs(t *testing.T) { } for _, ins := range deposits { - assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) + assert.NoError(t, dc.InsertDeposit(t.Context(), ins.deposit, ins.blkNum, ins.index, [32]byte{})) } - dc.PruneAllProofs(context.Background()) + dc.PruneAllProofs(t.Context()) assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof) assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof) diff --git a/beacon-chain/cache/registration_test.go b/beacon-chain/cache/registration_test.go index 0819e5249f..6efc5f1037 100644 --- a/beacon-chain/cache/registration_test.go +++ b/beacon-chain/cache/registration_test.go @@ -1,7 +1,6 @@ package cache import ( - "context" "testing" "time" @@ -24,7 +23,7 @@ func TestRegistrationCache(t *testing.T) { Timestamp: uint64(time.Now().Unix()), Pubkey: pubkey, } - cache.UpdateIndexToRegisteredMap(context.Background(), m) + cache.UpdateIndexToRegisteredMap(t.Context(), m) reg, err := cache.RegistrationByIndex(validatorIndex) require.NoError(t, err) require.Equal(t, string(reg.Pubkey), string(pubkey)) @@ -38,7 +37,7 @@ func TestRegistrationCache(t *testing.T) { Timestamp: uint64(time.Now().Unix()), Pubkey: pubkey, } - cache.UpdateIndexToRegisteredMap(context.Background(), m) + cache.UpdateIndexToRegisteredMap(t.Context(), m) reg, err := cache.RegistrationByIndex(validatorIndex2) require.NoError(t, err) require.Equal(t, string(reg.Pubkey), string(pubkey)) diff --git a/beacon-chain/cache/skip_slot_cache_test.go b/beacon-chain/cache/skip_slot_cache_test.go index ad73fb5bf3..9ec18d08b2 100644 --- a/beacon-chain/cache/skip_slot_cache_test.go +++ b/beacon-chain/cache/skip_slot_cache_test.go @@ -1,7 +1,6 @@ package cache_test import ( - "context" "sync" "testing" @@ -14,7 +13,7 @@ import ( ) func TestSkipSlotCache_RoundTrip(t *testing.T) { - ctx := context.Background() + ctx := t.Context() c := cache.NewSkipSlotCache() r := [32]byte{'a'} @@ -38,7 +37,7 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) { } func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) { - ctx := context.Background() + ctx := t.Context() c := cache.NewSkipSlotCache() r := [32]byte{'a'} diff --git a/beacon-chain/core/altair/attestation_test.go b/beacon-chain/core/altair/attestation_test.go index 8eb6907176..7aa93e0142 100644 --- a/beacon-chain/core/altair/attestation_test.go +++ b/beacon-chain/core/altair/attestation_test.go @@ -1,7 +1,6 @@ package altair_test import ( - "context" "fmt" "testing" @@ -51,7 +50,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) { ) wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block()) + _, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block()) require.ErrorContains(t, want, err) } @@ -82,7 +81,7 @@ func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) { ) wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block()) + _, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block()) require.ErrorContains(t, want, err) } @@ -111,13 +110,13 @@ func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) { want := "source check point not equal to current justified checkpoint" wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block()) + _, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block()) require.ErrorContains(t, want, err) b.Block.Body.Attestations[0].Data.Source.Epoch = time.CurrentEpoch(beaconState) b.Block.Body.Attestations[0].Data.Source.Root = []byte{} wsb, err = blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block()) + _, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block()) require.ErrorContains(t, want, err) } @@ -152,14 +151,14 @@ func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) { want := "source check point not equal to previous justified checkpoint" wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block()) + _, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block()) require.ErrorContains(t, want, err) b.Block.Body.Attestations[0].Data.Source.Epoch = time.PrevEpoch(beaconState) b.Block.Body.Attestations[0].Data.Target.Epoch = time.PrevEpoch(beaconState) b.Block.Body.Attestations[0].Data.Source.Root = []byte{} wsb, err = blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block()) + _, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block()) require.ErrorContains(t, want, err) } @@ -191,7 +190,7 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) { expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4" wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block()) + _, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block()) require.ErrorContains(t, expected, err) } @@ -215,7 +214,7 @@ func TestProcessAttestations_OK(t *testing.T) { cfc.Root = mockRoot[:] require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc)) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, 0) require.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -236,7 +235,7 @@ func TestProcessAttestations_OK(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block()) + _, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block()) require.NoError(t, err) }) t.Run("post-Electra", func(t *testing.T) { @@ -261,7 +260,7 @@ func TestProcessAttestations_OK(t *testing.T) { cfc.Root = mockRoot[:] require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc)) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, 0) require.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -282,7 +281,7 @@ func TestProcessAttestations_OK(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block()) + _, err = altair.ProcessAttestationsNoVerifySignature(t.Context(), beaconState, wsb.Block()) require.NoError(t, err) }) } @@ -314,13 +313,13 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) { b, err := helpers.TotalActiveBalance(beaconState) require.NoError(t, err) - beaconState, err = altair.ProcessAttestationNoVerifySignature(context.Background(), beaconState, att, b) + beaconState, err = altair.ProcessAttestationNoVerifySignature(t.Context(), beaconState, att, b) require.NoError(t, err) p, err := beaconState.CurrentEpochParticipation() require.NoError(t, err) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) require.NoError(t, err) indices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -475,7 +474,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) { } wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb.Block()) + r, err := altair.ProcessAttestationsNoVerifySignature(t.Context(), s, wsb.Block()) if err != nil && r != nil { t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, s, b) } @@ -557,10 +556,10 @@ func TestSetParticipationAndRewardProposer(t *testing.T) { b, err := helpers.TotalActiveBalance(beaconState) require.NoError(t, err) - st, err := altair.SetParticipationAndRewardProposer(context.Background(), beaconState, test.epoch, test.indices, test.participatedFlags, b) + st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b) require.NoError(t, err) - i, err := helpers.BeaconProposerIndex(context.Background(), st) + i, err := helpers.BeaconProposerIndex(t.Context(), st) require.NoError(t, err) b, err = beaconState.BalanceAtIndex(i) require.NoError(t, err) @@ -663,8 +662,8 @@ func TestRewardProposer(t *testing.T) { {rewardNumerator: 1000000000000, want: 34234377253}, } for _, test := range tests { - require.NoError(t, altair.RewardProposer(context.Background(), beaconState, test.rewardNumerator)) - i, err := helpers.BeaconProposerIndex(context.Background(), beaconState) + require.NoError(t, altair.RewardProposer(t.Context(), beaconState, test.rewardNumerator)) + i, err := helpers.BeaconProposerIndex(t.Context(), beaconState) require.NoError(t, err) b, err := beaconState.BalanceAtIndex(i) require.NoError(t, err) diff --git a/beacon-chain/core/altair/block_test.go b/beacon-chain/core/altair/block_test.go index 3e95acc482..9877c34f00 100644 --- a/beacon-chain/core/altair/block_test.go +++ b/beacon-chain/core/altair/block_test.go @@ -1,7 +1,6 @@ package altair_test import ( - "context" "math" "testing" @@ -26,7 +25,7 @@ import ( func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, beaconState.SetSlot(1)) - committee, err := altair.NextSyncCommittee(context.Background(), beaconState) + committee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) require.NoError(t, beaconState.SetCurrentSyncCommittee(committee)) @@ -34,7 +33,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) { for i := range syncBits { syncBits[i] = 0xff } - indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState) + indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState) require.NoError(t, err) ps := slots.PrevSlot(beaconState.Slot()) pbr, err := helpers.BlockRootAtSlot(beaconState, ps) @@ -55,7 +54,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) { } var reward uint64 - beaconState, reward, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate) + beaconState, reward, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate) require.NoError(t, err) assert.Equal(t, uint64(72192), reward) @@ -77,7 +76,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) { require.Equal(t, true, balances[indices[0]] > balances[nonSyncIndex]) // Proposer should be more profitable than rest of the sync committee - proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState) + proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState) require.NoError(t, err) require.Equal(t, true, balances[proposerIndex] > balances[indices[0]]) @@ -102,7 +101,7 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) { func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, beaconState.SetSlot(1)) - committee, err := altair.NextSyncCommittee(context.Background(), beaconState) + committee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) require.NoError(t, beaconState.SetCurrentSyncCommittee(committee)) @@ -110,7 +109,7 @@ func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) { for i := range syncBits { syncBits[i] = 0xAA } - indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState) + indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState) require.NoError(t, err) ps := slots.PrevSlot(beaconState.Slot()) pbr, err := helpers.BlockRootAtSlot(beaconState, ps) @@ -130,14 +129,14 @@ func TestProcessSyncCommittee_MixParticipation_BadSignature(t *testing.T) { SyncCommitteeSignature: aggregatedSig, } - _, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate) + _, _, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate) require.ErrorContains(t, "invalid sync committee signature", err) } func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, beaconState.SetSlot(1)) - committee, err := altair.NextSyncCommittee(context.Background(), beaconState) + committee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) require.NoError(t, beaconState.SetCurrentSyncCommittee(committee)) @@ -145,7 +144,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) { for i := range syncBits { syncBits[i] = 0xAA } - indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState) + indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState) require.NoError(t, err) ps := slots.PrevSlot(beaconState.Slot()) pbr, err := helpers.BlockRootAtSlot(beaconState, ps) @@ -167,7 +166,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) { SyncCommitteeSignature: aggregatedSig, } - _, _, err = altair.ProcessSyncAggregate(context.Background(), beaconState, syncAggregate) + _, _, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate) require.NoError(t, err) } @@ -175,7 +174,7 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) { func TestProcessSyncCommittee_DontPrecompute(t *testing.T) { beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, beaconState.SetSlot(1)) - committee, err := altair.NextSyncCommittee(context.Background(), beaconState) + committee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) committeeKeys := committee.Pubkeys committeeKeys[1] = committeeKeys[0] @@ -192,7 +191,7 @@ func TestProcessSyncCommittee_DontPrecompute(t *testing.T) { SyncCommitteeBits: syncBits, } require.NoError(t, beaconState.UpdateBalancesAtIndex(idx, 0)) - st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate) + st, votedKeys, _, err := altair.ProcessSyncAggregateEported(t.Context(), beaconState, syncAggregate) require.NoError(t, err) require.Equal(t, 511, len(votedKeys)) require.DeepEqual(t, committeeKeys[0], votedKeys[0].Marshal()) @@ -203,7 +202,7 @@ func TestProcessSyncCommittee_DontPrecompute(t *testing.T) { func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) { beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, beaconState.SetSlot(1)) - committee, err := altair.NextSyncCommittee(context.Background(), beaconState) + committee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) require.NoError(t, beaconState.SetCurrentSyncCommittee(committee)) @@ -215,7 +214,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) { SyncCommitteeBits: syncBits, } - st, votedKeys, _, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate) + st, votedKeys, _, err := altair.ProcessSyncAggregateEported(t.Context(), beaconState, syncAggregate) require.NoError(t, err) votedMap := make(map[[fieldparams.BLSPubkeyLength]byte]bool) for _, key := range votedKeys { @@ -228,7 +227,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) { committeeKeys := currentSyncCommittee.Pubkeys balances := st.Balances() - proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState) + proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState) require.NoError(t, err) for i := 0; i < len(syncBits); i++ { @@ -254,7 +253,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) { func Test_VerifySyncCommitteeSig(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, beaconState.SetSlot(1)) - committee, err := altair.NextSyncCommittee(context.Background(), beaconState) + committee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) require.NoError(t, beaconState.SetCurrentSyncCommittee(committee)) @@ -262,7 +261,7 @@ func Test_VerifySyncCommitteeSig(t *testing.T) { for i := range syncBits { syncBits[i] = 0xff } - indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState) + indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState) require.NoError(t, err) ps := slots.PrevSlot(beaconState.Slot()) pbr, err := helpers.BlockRootAtSlot(beaconState, ps) diff --git a/beacon-chain/core/altair/deposit_fuzz_test.go b/beacon-chain/core/altair/deposit_fuzz_test.go index 2261b35037..a70074bb63 100644 --- a/beacon-chain/core/altair/deposit_fuzz_test.go +++ b/beacon-chain/core/altair/deposit_fuzz_test.go @@ -1,7 +1,6 @@ package altair_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair" @@ -16,7 +15,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconStateAltair{} deposits := make([]*ethpb.Deposit, 100) - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) for i := range deposits { @@ -36,7 +35,7 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconStateAltair{} deposit := ðpb.Deposit{} - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) @@ -55,7 +54,7 @@ func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconState{} deposit := ðpb.Deposit{} - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) diff --git a/beacon-chain/core/altair/deposit_test.go b/beacon-chain/core/altair/deposit_test.go index ae0063a702..c35132d103 100644 --- a/beacon-chain/core/altair/deposit_test.go +++ b/beacon-chain/core/altair/deposit_test.go @@ -1,7 +1,6 @@ package altair_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair" @@ -42,7 +41,7 @@ func TestProcessDeposits_SameValidatorMultipleDepositsSameBlock(t *testing.T) { }, }) require.NoError(t, err) - newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0], dep[1], dep[2]}) + newState, err := altair.ProcessDeposits(t.Context(), beaconState, []*ethpb.Deposit{dep[0], dep[1], dep[2]}) require.NoError(t, err, "Expected block deposits to process correctly") require.Equal(t, 2, len(newState.Validators()), "Incorrect validator count") } @@ -70,7 +69,7 @@ func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) { }, }) require.NoError(t, err) - newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0]}) + newState, err := altair.ProcessDeposits(t.Context(), beaconState, []*ethpb.Deposit{dep[0]}) require.NoError(t, err, "Expected block deposits to process correctly") if newState.Balances()[1] != dep[0].Data.Amount { t.Errorf( @@ -127,7 +126,7 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) }, }) require.NoError(t, err) - newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{deposit}) + newState, err := altair.ProcessDeposits(t.Context(), beaconState, []*ethpb.Deposit{deposit}) require.NoError(t, err, "Process deposit failed") require.Equal(t, uint64(1000+50), newState.Balances()[1], "Expected balance at index 1 to be 1050") } @@ -256,7 +255,7 @@ func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) { }, }) require.NoError(t, err) - newState, err := altair.ProcessPreGenesisDeposits(context.Background(), beaconState, dep) + newState, err := altair.ProcessPreGenesisDeposits(t.Context(), beaconState, dep) require.NoError(t, err, "Expected invalid block deposit to be ignored without error") _, ok := newState.ValidatorIndexByPubkey(bytesutil.ToBytes48(dep[0].Data.PublicKey)) @@ -370,6 +369,6 @@ func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) { }) require.NoError(t, err) want := "deposit root did not verify" - _, err = altair.ProcessDeposits(context.Background(), beaconState, b.Block.Body.Deposits) + _, err = altair.ProcessDeposits(t.Context(), beaconState, b.Block.Body.Deposits) assert.ErrorContains(t, want, err) } diff --git a/beacon-chain/core/altair/epoch_precompute_test.go b/beacon-chain/core/altair/epoch_precompute_test.go index b9a670ba8a..87b491f8d9 100644 --- a/beacon-chain/core/altair/epoch_precompute_test.go +++ b/beacon-chain/core/altair/epoch_precompute_test.go @@ -1,7 +1,6 @@ package altair import ( - "context" "math" "testing" @@ -32,7 +31,7 @@ func TestInitializeEpochValidators_Ok(t *testing.T) { InactivityScores: []uint64{0, 1, 2, 3}, }) require.NoError(t, err) - v, b, err := InitializePrecomputeValidators(context.Background(), s) + v, b, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) assert.DeepEqual(t, &precompute.Validator{ IsSlashed: true, @@ -74,7 +73,7 @@ func TestInitializeEpochValidators_Overflow(t *testing.T) { InactivityScores: []uint64{0, 1}, }) require.NoError(t, err) - _, _, err = InitializePrecomputeValidators(context.Background(), s) + _, _, err = InitializePrecomputeValidators(t.Context(), s) require.ErrorContains(t, "could not read every validator: addition overflows", err) } @@ -84,16 +83,16 @@ func TestInitializeEpochValidators_BadState(t *testing.T) { InactivityScores: []uint64{}, }) require.NoError(t, err) - _, _, err = InitializePrecomputeValidators(context.Background(), s) + _, _, err = InitializePrecomputeValidators(t.Context(), s) require.ErrorContains(t, "num of validators is different than num of inactivity scores", err) } func TestProcessEpochParticipation(t *testing.T) { s, err := testState() require.NoError(t, err) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) require.DeepEqual(t, &precompute.Validator{ IsActiveCurrentEpoch: true, @@ -169,9 +168,9 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) { InactivityScores: []uint64{0, 0, 0}, }) require.NoError(t, err) - validators, balance, err := InitializePrecomputeValidators(context.Background(), st) + validators, balance, err := InitializePrecomputeValidators(t.Context(), st) require.NoError(t, err) - validators, balance, err = ProcessEpochParticipation(context.Background(), st, balance, validators) + validators, balance, err = ProcessEpochParticipation(t.Context(), st, balance, validators) require.NoError(t, err) require.DeepEqual(t, &precompute.Validator{ IsActiveCurrentEpoch: false, @@ -209,9 +208,9 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) { func TestAttestationsDelta(t *testing.T) { s, err := testState() require.NoError(t, err) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) deltas, err := AttestationsDelta(s, balance, validators) require.NoError(t, err) @@ -247,9 +246,9 @@ func TestAttestationsDelta(t *testing.T) { func TestAttestationsDeltaBellatrix(t *testing.T) { s, err := testStateBellatrix() require.NoError(t, err) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) deltas, err := AttestationsDelta(s, balance, validators) require.NoError(t, err) @@ -285,9 +284,9 @@ func TestAttestationsDeltaBellatrix(t *testing.T) { func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) { s, err := testState() require.NoError(t, err) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators) require.NoError(t, err) @@ -324,9 +323,9 @@ func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) { func TestProcessRewardsAndPenaltiesPrecompute_InactivityLeak(t *testing.T) { s, err := testState() require.NoError(t, err) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) sCopy := s.Copy() s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators) @@ -352,11 +351,11 @@ func TestProcessInactivityScores_CanProcessInactivityLeak(t *testing.T) { defaultScore := uint64(5) require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore})) require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().MinEpochsToInactivityPenalty+2))) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) - s, _, err = ProcessInactivityScores(context.Background(), s, validators) + s, _, err = ProcessInactivityScores(t.Context(), s, validators) require.NoError(t, err) inactivityScores, err := s.InactivityScores() require.NoError(t, err) @@ -373,11 +372,11 @@ func TestProcessInactivityScores_GenesisEpoch(t *testing.T) { defaultScore := uint64(10) require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore})) require.NoError(t, s.SetSlot(params.BeaconConfig().GenesisSlot)) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) - s, _, err = ProcessInactivityScores(context.Background(), s, validators) + s, _, err = ProcessInactivityScores(t.Context(), s, validators) require.NoError(t, err) inactivityScores, err := s.InactivityScores() require.NoError(t, err) @@ -392,11 +391,11 @@ func TestProcessInactivityScores_CanProcessNonInactivityLeak(t *testing.T) { require.NoError(t, err) defaultScore := uint64(5) require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore})) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) - s, _, err = ProcessInactivityScores(context.Background(), s, validators) + s, _, err = ProcessInactivityScores(t.Context(), s, validators) require.NoError(t, err) inactivityScores, err := s.InactivityScores() require.NoError(t, err) @@ -410,9 +409,9 @@ func TestProcessInactivityScores_CanProcessNonInactivityLeak(t *testing.T) { func TestProcessRewardsAndPenaltiesPrecompute_GenesisEpoch(t *testing.T) { s, err := testState() require.NoError(t, err) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) require.NoError(t, s.SetSlot(0)) s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators) @@ -429,9 +428,9 @@ func TestProcessRewardsAndPenaltiesPrecompute_GenesisEpoch(t *testing.T) { func TestProcessRewardsAndPenaltiesPrecompute_BadState(t *testing.T) { s, err := testState() require.NoError(t, err) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) - _, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + _, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) _, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, []*precompute.Validator{}) require.ErrorContains(t, "validator registries not the same length as state's validator registries", err) @@ -442,7 +441,7 @@ func TestProcessInactivityScores_NonEligibleValidator(t *testing.T) { require.NoError(t, err) defaultScore := uint64(5) require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore})) - validators, balance, err := InitializePrecomputeValidators(context.Background(), s) + validators, balance, err := InitializePrecomputeValidators(t.Context(), s) require.NoError(t, err) // v0 is eligible (not active previous epoch, slashed and not withdrawable) @@ -463,9 +462,9 @@ func TestProcessInactivityScores_NonEligibleValidator(t *testing.T) { // v3 is eligible (active previous epoch) validators[3].IsActivePrevEpoch = true - validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators) + validators, _, err = ProcessEpochParticipation(t.Context(), s, balance, validators) require.NoError(t, err) - s, _, err = ProcessInactivityScores(context.Background(), s, validators) + s, _, err = ProcessInactivityScores(t.Context(), s, validators) require.NoError(t, err) inactivityScores, err := s.InactivityScores() require.NoError(t, err) diff --git a/beacon-chain/core/altair/epoch_spec_test.go b/beacon-chain/core/altair/epoch_spec_test.go index 9115367b2b..5de4fc3817 100644 --- a/beacon-chain/core/altair/epoch_spec_test.go +++ b/beacon-chain/core/altair/epoch_spec_test.go @@ -1,7 +1,6 @@ package altair_test import ( - "context" "fmt" "math" "testing" @@ -29,7 +28,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) { BodyRoot: bytesutil.PadTo([]byte{'c'}, 32), } require.NoError(t, s.SetLatestBlockHeader(h)) - postState, err := altair.ProcessSyncCommitteeUpdates(context.Background(), s) + postState, err := altair.ProcessSyncCommitteeUpdates(t.Context(), s) require.NoError(t, err) current, err := postState.CurrentSyncCommittee() require.NoError(t, err) @@ -38,7 +37,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) { require.DeepEqual(t, current, next) require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch)) - postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s) + postState, err = altair.ProcessSyncCommitteeUpdates(t.Context(), s) require.NoError(t, err) c, err := postState.CurrentSyncCommittee() require.NoError(t, err) @@ -48,7 +47,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) { require.DeepEqual(t, next, n) require.NoError(t, s.SetSlot(primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch-1)) - postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s) + postState, err = altair.ProcessSyncCommitteeUpdates(t.Context(), s) require.NoError(t, err) c, err = postState.CurrentSyncCommittee() require.NoError(t, err) @@ -61,7 +60,7 @@ func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) { // Test boundary condition. slot := params.BeaconConfig().SlotsPerEpoch * primitives.Slot(time.CurrentEpoch(s)+params.BeaconConfig().EpochsPerSyncCommitteePeriod) require.NoError(t, s.SetSlot(slot)) - boundaryCommittee, err := altair.NextSyncCommittee(context.Background(), s) + boundaryCommittee, err := altair.NextSyncCommittee(t.Context(), s) require.NoError(t, err) require.DeepNotEqual(t, boundaryCommittee, n) } diff --git a/beacon-chain/core/altair/sync_committee_test.go b/beacon-chain/core/altair/sync_committee_test.go index d3b9fe3a5a..991653bf6f 100644 --- a/beacon-chain/core/altair/sync_committee_test.go +++ b/beacon-chain/core/altair/sync_committee_test.go @@ -1,7 +1,6 @@ package altair_test import ( - "context" "testing" "time" @@ -97,7 +96,7 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) { t.Run(version.String(v), func(t *testing.T) { helpers.ClearCache() st := getState(t, tt.args.validatorCount, v) - got, err := altair.NextSyncCommitteeIndices(context.Background(), st) + got, err := altair.NextSyncCommitteeIndices(t.Context(), st) if tt.wantErr { require.ErrorContains(t, tt.errString, err) } else { @@ -129,18 +128,18 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) { } st := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee) - got1, err := altair.NextSyncCommitteeIndices(context.Background(), st) + got1, err := altair.NextSyncCommitteeIndices(t.Context(), st) require.NoError(t, err) require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch)) - got2, err := altair.NextSyncCommitteeIndices(context.Background(), st) + got2, err := altair.NextSyncCommitteeIndices(t.Context(), st) require.NoError(t, err) require.DeepNotEqual(t, got1, got2) require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod))) - got2, err = altair.NextSyncCommitteeIndices(context.Background(), st) + got2, err = altair.NextSyncCommitteeIndices(t.Context(), st) require.NoError(t, err) require.DeepNotEqual(t, got1, got2) require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod))) - got2, err = altair.NextSyncCommitteeIndices(context.Background(), st) + got2, err = altair.NextSyncCommitteeIndices(t.Context(), st) require.NoError(t, err) require.DeepNotEqual(t, got1, got2) } @@ -206,7 +205,7 @@ func TestSyncCommittee_CanGet(t *testing.T) { if !tt.wantErr { require.NoError(t, tt.args.state.SetSlot(primitives.Slot(tt.args.epoch)*params.BeaconConfig().SlotsPerEpoch)) } - got, err := altair.NextSyncCommittee(context.Background(), tt.args.state) + got, err := altair.NextSyncCommittee(t.Context(), tt.args.state) if tt.wantErr { require.ErrorContains(t, tt.errString, err) } else { @@ -270,7 +269,7 @@ func TestValidateNilSyncContribution(t *testing.T) { func TestSyncSubCommitteePubkeys_CanGet(t *testing.T) { helpers.ClearCache() st := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee) - com, err := altair.NextSyncCommittee(context.Background(), st) + com, err := altair.NextSyncCommittee(t.Context(), st) require.NoError(t, err) sub, err := altair.SyncSubCommitteePubkeys(com, 0) require.NoError(t, err) diff --git a/beacon-chain/core/altair/transition_test.go b/beacon-chain/core/altair/transition_test.go index dbb4ca8d6b..d41e842a8a 100644 --- a/beacon-chain/core/altair/transition_test.go +++ b/beacon-chain/core/altair/transition_test.go @@ -1,7 +1,6 @@ package altair_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair" @@ -13,7 +12,7 @@ import ( func TestProcessEpoch_CanProcess(t *testing.T) { st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch)) - err := altair.ProcessEpoch(context.Background(), st) + err := altair.ProcessEpoch(t.Context(), st) require.NoError(t, err) require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance") @@ -45,7 +44,7 @@ func TestProcessEpoch_CanProcess(t *testing.T) { func TestProcessEpoch_CanProcessBellatrix(t *testing.T) { st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch)) - err := altair.ProcessEpoch(context.Background(), st) + err := altair.ProcessEpoch(t.Context(), st) require.NoError(t, err) require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance") diff --git a/beacon-chain/core/altair/upgrade_test.go b/beacon-chain/core/altair/upgrade_test.go index ddc7316bb9..15117ac696 100644 --- a/beacon-chain/core/altair/upgrade_test.go +++ b/beacon-chain/core/altair/upgrade_test.go @@ -1,7 +1,6 @@ package altair_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair" @@ -17,7 +16,7 @@ import ( ) func TestTranslateParticipation(t *testing.T) { - ctx := context.Background() + ctx := t.Context() s, _ := util.DeterministicGenesisStateAltair(t, 64) require.NoError(t, s.SetSlot(s.Slot()+params.BeaconConfig().MinAttestationInclusionDelay)) @@ -73,7 +72,7 @@ func TestTranslateParticipation(t *testing.T) { func TestUpgradeToAltair(t *testing.T) { st, _ := util.DeterministicGenesisState(t, params.BeaconConfig().MaxValidatorsPerCommittee) preForkState := st.Copy() - aState, err := altair.UpgradeToAltair(context.Background(), st) + aState, err := altair.UpgradeToAltair(t.Context(), st) require.NoError(t, err) require.Equal(t, preForkState.GenesisTime(), aState.GenesisTime()) diff --git a/beacon-chain/core/blocks/attestation_regression_test.go b/beacon-chain/core/blocks/attestation_regression_test.go index f88b293602..cc3c367beb 100644 --- a/beacon-chain/core/blocks/attestation_regression_test.go +++ b/beacon-chain/core/blocks/attestation_regression_test.go @@ -41,7 +41,7 @@ func TestProcessAttestationNoVerifySignature_BeaconFuzzIssue78(t *testing.T) { t.Fatal(err) } - ctx := context.Background() + ctx := t.Context() _, err = blocks.ProcessAttestationNoVerifySignature(ctx, st, att) require.ErrorContains(t, "committee index 1 >= committee count 1", err) } diff --git a/beacon-chain/core/blocks/attestation_test.go b/beacon-chain/core/blocks/attestation_test.go index fd1d716807..916c31c256 100644 --- a/beacon-chain/core/blocks/attestation_test.go +++ b/beacon-chain/core/blocks/attestation_test.go @@ -42,7 +42,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) { require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc)) require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{})) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex) require.NoError(t, err) attestingIndices1, err := attestation.AttestingIndices(att1, committee) require.NoError(t, err) @@ -64,7 +64,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) { AggregationBits: aggBits2, } - committee, err = helpers.BeaconCommitteeFromState(context.Background(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex) + committee, err = helpers.BeaconCommitteeFromState(t.Context(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex) require.NoError(t, err) attestingIndices2, err := attestation.AttestingIndices(att2, committee) require.NoError(t, err) @@ -137,7 +137,7 @@ func TestProcessAttestationsNoVerify_OlderThanSlotsPerEpoch(t *testing.T) { }, AggregationBits: aggBits, } - ctx := context.Background() + ctx := t.Context() t.Run("attestation older than slots per epoch", func(t *testing.T) { beaconState, _ := util.DeterministicGenesisState(t, 100) @@ -360,9 +360,9 @@ func TestConvertToIndexed_OK(t *testing.T) { Signature: att.Signature, } - committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), state, att.Data.Slot, att.Data.CommitteeIndex) require.NoError(t, err) - ia, err := attestation.ConvertToIndexed(context.Background(), att, committee) + ia, err := attestation.ConvertToIndexed(t.Context(), att, committee) require.NoError(t, err) assert.DeepEqual(t, wanted, ia, "Convert attestation to indexed attestation didn't result as wanted") } @@ -448,7 +448,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) { tt.attestation.Signature = marshalledSig - err = blocks.VerifyIndexedAttestation(context.Background(), state, tt.attestation) + err = blocks.VerifyIndexedAttestation(t.Context(), state, tt.attestation) assert.NoError(t, err, "Failed to verify indexed attestation") } } @@ -471,7 +471,7 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) { want := "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE" st, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{}) require.NoError(t, err) - err = blocks.VerifyIndexedAttestation(context.Background(), st, indexedAtt1) + err = blocks.VerifyIndexedAttestation(t.Context(), st, indexedAtt1) assert.ErrorContains(t, want, err) } @@ -493,7 +493,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) { } want := "nil or missing indexed attestation data" - _, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts) + _, err := blocks.AttestationSignatureBatch(t.Context(), beaconState, atts) assert.ErrorContains(t, want, err) atts = []ethpb.Att{} @@ -514,7 +514,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) { } want = "expected non-empty attesting indices" - _, err = blocks.AttestationSignatureBatch(context.Background(), beaconState, atts) + _, err = blocks.AttestationSignatureBatch(t.Context(), beaconState, atts) assert.ErrorContains(t, want, err) } @@ -542,7 +542,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) { PreviousVersion: params.BeaconConfig().GenesisForkVersion, })) - comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/) + comm1, err := helpers.BeaconCommitteeFromState(t.Context(), st, 1 /*slot*/, 0 /*committeeIndex*/) require.NoError(t, err) att1 := util.HydrateAttestation(ðpb.Attestation{ AggregationBits: bitfield.NewBitlist(uint64(len(comm1))), @@ -561,7 +561,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) { } att1.Signature = bls.AggregateSignatures(sigs).Marshal() - comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1*params.BeaconConfig().SlotsPerEpoch+1 /*slot*/, 1 /*committeeIndex*/) + comm2, err := helpers.BeaconCommitteeFromState(t.Context(), st, 1*params.BeaconConfig().SlotsPerEpoch+1 /*slot*/, 1 /*committeeIndex*/) require.NoError(t, err) att2 := util.HydrateAttestation(ðpb.Attestation{ AggregationBits: bitfield.NewBitlist(uint64(len(comm2))), @@ -583,7 +583,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) { } func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numOfValidators := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4)) validators := make([]*ethpb.Validator, numOfValidators) _, keys, err := util.DeterministicDepositsAndKeys(numOfValidators) @@ -602,7 +602,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing require.NoError(t, st.SetSlot(5)) require.NoError(t, st.SetValidators(validators)) - comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/) + comm1, err := helpers.BeaconCommitteeFromState(t.Context(), st, 1 /*slot*/, 0 /*committeeIndex*/) require.NoError(t, err) att1 := util.HydrateAttestation(ðpb.Attestation{ AggregationBits: bitfield.NewBitlist(uint64(len(comm1))), @@ -621,7 +621,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing } att1.Signature = bls.AggregateSignatures(sigs).Marshal() - comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/) + comm2, err := helpers.BeaconCommitteeFromState(t.Context(), st, 1 /*slot*/, 1 /*committeeIndex*/) require.NoError(t, err) att2 := util.HydrateAttestation(ðpb.Attestation{ AggregationBits: bitfield.NewBitlist(uint64(len(comm2))), @@ -651,7 +651,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing require.NoError(t, st.SetSlot(5)) require.NoError(t, st.SetValidators(validators)) - comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/) + comm1, err := helpers.BeaconCommitteeFromState(t.Context(), st, 1 /*slot*/, 0 /*committeeIndex*/) require.NoError(t, err) commBits1 := primitives.NewAttestationCommitteeBits() commBits1.SetBitAt(0, true) @@ -673,7 +673,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing } att1.Signature = bls.AggregateSignatures(sigs).Marshal() - comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/) + comm2, err := helpers.BeaconCommitteeFromState(t.Context(), st, 1 /*slot*/, 1 /*committeeIndex*/) require.NoError(t, err) commBits2 := primitives.NewAttestationCommitteeBits() commBits2.SetBitAt(1, true) @@ -702,7 +702,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing } func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numOfValidators := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4)) validators := make([]*ethpb.Validator, numOfValidators) _, keys, err := util.DeterministicDepositsAndKeys(numOfValidators) diff --git a/beacon-chain/core/blocks/attester_slashing_test.go b/beacon-chain/core/blocks/attester_slashing_test.go index 50e95af05b..14b6795bc3 100644 --- a/beacon-chain/core/blocks/attester_slashing_test.go +++ b/beacon-chain/core/blocks/attester_slashing_test.go @@ -1,7 +1,6 @@ package blocks_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks" @@ -63,7 +62,7 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) { for i, s := range b.Block.Body.AttesterSlashings { ss[i] = s } - _, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator) + _, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator) assert.ErrorContains(t, "attestations are not slashable", err) } @@ -102,7 +101,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) for i, s := range b.Block.Body.AttesterSlashings { ss[i] = s } - _, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator) + _, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator) assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err) } @@ -244,7 +243,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) { currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch require.NoError(t, tc.st.SetSlot(currentSlot)) - newState, err := blocks.ProcessAttesterSlashings(context.Background(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator) + newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator) require.NoError(t, err) newRegistry := newState.Validators() diff --git a/beacon-chain/core/blocks/block_operations_fuzz_test.go b/beacon-chain/core/blocks/block_operations_fuzz_test.go index 43ad410c01..d06278d486 100644 --- a/beacon-chain/core/blocks/block_operations_fuzz_test.go +++ b/beacon-chain/core/blocks/block_operations_fuzz_test.go @@ -1,7 +1,6 @@ package blocks import ( - "context" "testing" v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators" @@ -18,7 +17,7 @@ import ( func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) - ctx := context.Background() + ctx := t.Context() state := ðpb.BeaconState{} att := ðpb.Attestation{} @@ -49,7 +48,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) { } wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = ProcessBlockHeader(context.Background(), s, wsb) + _, err = ProcessBlockHeader(t.Context(), s, wsb) _ = err fuzz.FreeMemory(i) } @@ -87,7 +86,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) { for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) fuzzer.Fuzz(e) - s, err := ProcessEth1DataInBlock(context.Background(), state, e) + s, err := ProcessEth1DataInBlock(t.Context(), state, e) if err != nil && s != nil { t.Fatalf("state should be nil on err. found: %v on error: %v for state: %v and eth1data: %v", s, err, state, e) } @@ -134,7 +133,7 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) { fuzzer.Fuzz(block) s, err := state_native.InitializeFromProtoUnsafePhase0(state) require.NoError(t, err) - _, err = ProcessBlockHeaderNoVerify(context.Background(), s, block.Slot, block.ProposerIndex, block.ParentRoot, []byte{}) + _, err = ProcessBlockHeaderNoVerify(t.Context(), s, block.Slot, block.ProposerIndex, block.ParentRoot, []byte{}) _ = err fuzz.FreeMemory(i) } @@ -155,7 +154,7 @@ func TestFuzzProcessRandao_10000(t *testing.T) { } wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - r, err := ProcessRandao(context.Background(), s, wsb) + r, err := ProcessRandao(t.Context(), s, wsb) if err != nil && r != nil { t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b) } @@ -185,7 +184,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconState{} p := ðpb.ProposerSlashing{} - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) fuzzer.Fuzz(p) @@ -218,7 +217,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconState{} a := ðpb.AttesterSlashing{} - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) fuzzer.Fuzz(a) @@ -236,7 +235,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconState{} attesterSlashing := ðpb.AttesterSlashing{} - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) fuzzer.Fuzz(attesterSlashing) @@ -274,7 +273,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconState{} b := ðpb.SignedBeaconBlock{} - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) fuzzer.Fuzz(b) @@ -297,7 +296,7 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconState{} idxAttestation := ðpb.IndexedAttestation{} - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) fuzzer.Fuzz(idxAttestation) @@ -327,7 +326,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconState{} e := ðpb.SignedVoluntaryExit{} - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) fuzzer.Fuzz(e) @@ -350,7 +349,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) { fuzzer.Fuzz(e) s, err := state_native.InitializeFromProtoUnsafePhase0(state) require.NoError(t, err) - r, err := ProcessVoluntaryExits(context.Background(), s, []*ethpb.SignedVoluntaryExit{e}) + r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e}) if err != nil && r != nil { t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, e) } diff --git a/beacon-chain/core/blocks/block_regression_test.go b/beacon-chain/core/blocks/block_regression_test.go index 9b159dcdd0..d6519f0062 100644 --- a/beacon-chain/core/blocks/block_regression_test.go +++ b/beacon-chain/core/blocks/block_regression_test.go @@ -1,7 +1,6 @@ package blocks_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks" @@ -95,7 +94,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) { for i, s := range b.Block.Body.AttesterSlashings { ss[i] = s } - newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator) + newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator) require.NoError(t, err) newRegistry := newState.Validators() if !newRegistry[expectedSlashedVal].Slashed { diff --git a/beacon-chain/core/blocks/deposit_test.go b/beacon-chain/core/blocks/deposit_test.go index 0abebf7f5a..872f5b7f47 100644 --- a/beacon-chain/core/blocks/deposit_test.go +++ b/beacon-chain/core/blocks/deposit_test.go @@ -1,7 +1,6 @@ package blocks_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks" @@ -46,7 +45,7 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) { deposit.Proof = proof require.NoError(t, err) - verified, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit}) + verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit}) require.NoError(t, err) require.Equal(t, true, verified) } @@ -69,7 +68,7 @@ func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) { deposit.Proof = proof require.NoError(t, err) - verified, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit}) + verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit}) require.NoError(t, err) require.Equal(t, false, verified) } @@ -164,7 +163,7 @@ func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) { sig2 := sk2.Sign(sr2[:]) pendingDeposit2.Signature = sig2.Marshal() - verified, err := blocks.BatchVerifyPendingDepositsSignatures(context.Background(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2}) + verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2}) require.NoError(t, err) require.Equal(t, true, verified) } @@ -175,7 +174,7 @@ func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) { WithdrawalCredentials: make([]byte, 32), Signature: make([]byte, 96), } - verified, err := blocks.BatchVerifyPendingDepositsSignatures(context.Background(), []*ethpb.PendingDeposit{pendingDeposit}) + verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit}) require.NoError(t, err) require.Equal(t, false, verified) } diff --git a/beacon-chain/core/blocks/eth1_data_test.go b/beacon-chain/core/blocks/eth1_data_test.go index a20319aaa0..b3a6d387ff 100644 --- a/beacon-chain/core/blocks/eth1_data_test.go +++ b/beacon-chain/core/blocks/eth1_data_test.go @@ -1,7 +1,6 @@ package blocks_test import ( - "context" "fmt" "testing" @@ -177,7 +176,7 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) { period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod))) for i := uint64(0); i < period; i++ { - processedState, err := blocks.ProcessEth1DataInBlock(context.Background(), beaconState, b.Block.Body.Eth1Data) + processedState, err := blocks.ProcessEth1DataInBlock(t.Context(), beaconState, b.Block.Body.Eth1Data) require.NoError(t, err) require.Equal(t, true, processedState.Version() == version.Phase0) } diff --git a/beacon-chain/core/blocks/exit_test.go b/beacon-chain/core/blocks/exit_test.go index cd3758f16f..d9d4add8ec 100644 --- a/beacon-chain/core/blocks/exit_test.go +++ b/beacon-chain/core/blocks/exit_test.go @@ -1,7 +1,6 @@ package blocks_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks" @@ -47,7 +46,7 @@ func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) { } want := "validator has not been active long enough to exit" - _, err = blocks.ProcessVoluntaryExits(context.Background(), state, b.Block.Body.VoluntaryExits) + _, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits) assert.ErrorContains(t, want, err) } @@ -77,7 +76,7 @@ func TestProcessVoluntaryExits_ExitAlreadySubmitted(t *testing.T) { } want := "validator with index 0 has already submitted an exit, which will take place at epoch: 10" - _, err = blocks.ProcessVoluntaryExits(context.Background(), state, b.Block.Body.VoluntaryExits) + _, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits) assert.ErrorContains(t, want, err) } @@ -125,7 +124,7 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) { }, } - newState, err := blocks.ProcessVoluntaryExits(context.Background(), state, b.Block.Body.VoluntaryExits) + newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits) require.NoError(t, err, "Could not process exits") newRegistry := newState.Validators() if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(primitives.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)) { diff --git a/beacon-chain/core/blocks/header_test.go b/beacon-chain/core/blocks/header_test.go index c23b87b202..a1da62f6fd 100644 --- a/beacon-chain/core/blocks/header_test.go +++ b/beacon-chain/core/blocks/header_test.go @@ -1,7 +1,6 @@ package blocks_test import ( - "context" "io" "testing" @@ -51,7 +50,7 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) { currentEpoch := time.CurrentEpoch(state) priv, err := bls.RandKey() require.NoError(t, err) - pID, err := helpers.BeaconProposerIndex(context.Background(), state) + pID, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err) block := util.NewBeaconBlock() block.Block.ProposerIndex = pID @@ -61,7 +60,7 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) { block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv) require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), state) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err) validators[proposerIdx].Slashed = false validators[proposerIdx].PublicKey = priv.PublicKey().Marshal() @@ -70,7 +69,7 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) { wsb, err := consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = blocks.ProcessBlockHeader(context.Background(), state, wsb) + _, err = blocks.ProcessBlockHeader(t.Context(), state, wsb) assert.ErrorContains(t, "block.Slot 10 must be greater than state.LatestBlockHeader.Slot 10", err) } @@ -85,7 +84,7 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) { lbhdr, err := beaconState.LatestBlockHeader().HashTreeRoot() require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), beaconState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), beaconState) require.NoError(t, err) block := util.NewBeaconBlock() @@ -98,7 +97,7 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) { wsb, err := consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = blocks.ProcessBlockHeader(context.Background(), beaconState, wsb) + _, err = blocks.ProcessBlockHeader(t.Context(), beaconState, wsb) want := "signature did not verify" assert.ErrorContains(t, want, err) } @@ -142,7 +141,7 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) { wsb, err := consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = blocks.ProcessBlockHeader(context.Background(), state, wsb) + _, err = blocks.ProcessBlockHeader(t.Context(), state, wsb) want := "is different than block slot" assert.ErrorContains(t, want, err) } @@ -172,7 +171,7 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) { blockSig, err := signing.ComputeDomainAndSign(state, currentEpoch, &sszBytes, params.BeaconConfig().DomainBeaconProposer, priv) require.NoError(t, err) validators[5896].PublicKey = priv.PublicKey().Marshal() - pID, err := helpers.BeaconProposerIndex(context.Background(), state) + pID, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err) block := util.NewBeaconBlock() block.Block.Slot = 10 @@ -183,7 +182,7 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) { wsb, err := consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = blocks.ProcessBlockHeader(context.Background(), state, wsb) + _, err = blocks.ProcessBlockHeader(t.Context(), state, wsb) want := "does not match" assert.ErrorContains(t, want, err) } @@ -216,7 +215,7 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) { require.NoError(t, err) validators[12683].PublicKey = priv.PublicKey().Marshal() - pID, err := helpers.BeaconProposerIndex(context.Background(), state) + pID, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err) block := util.NewBeaconBlock() block.Block.Slot = 10 @@ -227,7 +226,7 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) { wsb, err := consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = blocks.ProcessBlockHeader(context.Background(), state, wsb) + _, err = blocks.ProcessBlockHeader(t.Context(), state, wsb) want := "was previously slashed" assert.ErrorContains(t, want, err) } @@ -257,7 +256,7 @@ func TestProcessBlockHeader_OK(t *testing.T) { currentEpoch := time.CurrentEpoch(state) priv, err := bls.RandKey() require.NoError(t, err) - pID, err := helpers.BeaconProposerIndex(context.Background(), state) + pID, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err) block := util.NewBeaconBlock() block.Block.ProposerIndex = pID @@ -269,7 +268,7 @@ func TestProcessBlockHeader_OK(t *testing.T) { bodyRoot, err := block.Block.Body.HashTreeRoot() require.NoError(t, err, "Failed to hash block bytes got") - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), state) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err) validators[proposerIdx].Slashed = false validators[proposerIdx].PublicKey = priv.PublicKey().Marshal() @@ -278,7 +277,7 @@ func TestProcessBlockHeader_OK(t *testing.T) { wsb, err := consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - newState, err := blocks.ProcessBlockHeader(context.Background(), state, wsb) + newState, err := blocks.ProcessBlockHeader(t.Context(), state, wsb) require.NoError(t, err, "Failed to process block header got") var zeroHash [32]byte nsh := newState.LatestBlockHeader() @@ -318,7 +317,7 @@ func TestBlockSignatureSet_OK(t *testing.T) { currentEpoch := time.CurrentEpoch(state) priv, err := bls.RandKey() require.NoError(t, err) - pID, err := helpers.BeaconProposerIndex(context.Background(), state) + pID, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err) block := util.NewBeaconBlock() block.Block.Slot = 10 @@ -327,7 +326,7 @@ func TestBlockSignatureSet_OK(t *testing.T) { block.Block.ParentRoot = latestBlockSignedRoot[:] block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv) require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), state) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err) validators[proposerIdx].Slashed = false validators[proposerIdx].PublicKey = priv.PublicKey().Marshal() diff --git a/beacon-chain/core/blocks/proposer_slashing_test.go b/beacon-chain/core/blocks/proposer_slashing_test.go index 402b346697..7fe013a397 100644 --- a/beacon-chain/core/blocks/proposer_slashing_test.go +++ b/beacon-chain/core/blocks/proposer_slashing_test.go @@ -1,7 +1,6 @@ package blocks_test import ( - "context" "fmt" "testing" @@ -51,7 +50,7 @@ func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) { }, } want := "mismatched header slots" - _, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator) + _, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator) assert.ErrorContains(t, want, err) } @@ -84,7 +83,7 @@ func TestProcessProposerSlashings_SameHeaders(t *testing.T) { }, } want := "expected slashing headers to differ" - _, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator) + _, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator) assert.ErrorContains(t, want, err) } @@ -134,7 +133,7 @@ func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) { "validator with key %#x is not slashable", bytesutil.ToBytes48(beaconState.Validators()[0].PublicKey), ) - _, err = blocks.ProcessProposerSlashings(context.Background(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator) + _, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator) assert.ErrorContains(t, want, err) } @@ -173,7 +172,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) { block := util.NewBeaconBlock() block.Block.Body.ProposerSlashings = slashings - newState, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator) + newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator) require.NoError(t, err) newStateVals := newState.Validators() @@ -221,7 +220,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusAltair(t *testing.T) { block := util.NewBeaconBlock() block.Block.Body.ProposerSlashings = slashings - newState, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator) + newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator) require.NoError(t, err) newStateVals := newState.Validators() @@ -269,7 +268,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusBellatrix(t *testing.T) { block := util.NewBeaconBlock() block.Block.Body.ProposerSlashings = slashings - newState, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator) + newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator) require.NoError(t, err) newStateVals := newState.Validators() @@ -317,7 +316,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusCapella(t *testing.T) { block := util.NewBeaconBlock() block.Block.Body.ProposerSlashings = slashings - newState, err := blocks.ProcessProposerSlashings(context.Background(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator) + newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator) require.NoError(t, err) newStateVals := newState.Validators() diff --git a/beacon-chain/core/blocks/randao_test.go b/beacon-chain/core/blocks/randao_test.go index 0abf06035a..aa0c824a08 100644 --- a/beacon-chain/core/blocks/randao_test.go +++ b/beacon-chain/core/blocks/randao_test.go @@ -1,7 +1,6 @@ package blocks_test import ( - "context" "encoding/binary" "testing" @@ -21,7 +20,7 @@ import ( func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisState(t, 100) // We fetch the proposer's index as that is whom the RANDAO will be verified against. - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), beaconState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), beaconState) require.NoError(t, err) epoch := primitives.Epoch(0) buf := make([]byte, 32) @@ -42,7 +41,7 @@ func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) { want := "block randao: signature did not verify" wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = blocks.ProcessRandao(context.Background(), beaconState, wsb) + _, err = blocks.ProcessRandao(t.Context(), beaconState, wsb) assert.ErrorContains(t, want, err) } @@ -62,7 +61,7 @@ func TestProcessRandao_SignatureVerifiesAndUpdatesLatestStateMixes(t *testing.T) wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) newState, err := blocks.ProcessRandao( - context.Background(), + t.Context(), beaconState, wsb, ) @@ -85,7 +84,7 @@ func TestRandaoSignatureSet_OK(t *testing.T) { }, } - set, err := blocks.RandaoSignatureBatch(context.Background(), beaconState, block.Body.RandaoReveal) + set, err := blocks.RandaoSignatureBatch(t.Context(), beaconState, block.Body.RandaoReveal) require.NoError(t, err) verified, err := set.Verify() require.NoError(t, err) diff --git a/beacon-chain/core/electra/deposit_fuzz_test.go b/beacon-chain/core/electra/deposit_fuzz_test.go index 8addf57173..435b0e0748 100644 --- a/beacon-chain/core/electra/deposit_fuzz_test.go +++ b/beacon-chain/core/electra/deposit_fuzz_test.go @@ -1,7 +1,6 @@ package electra_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra" @@ -16,7 +15,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) { fuzzer := gofuzz.NewWithSeed(0) state := ðpb.BeaconStateElectra{} deposits := make([]*ethpb.Deposit, 100) - ctx := context.Background() + ctx := t.Context() for i := 0; i < 10000; i++ { fuzzer.Fuzz(state) for i := range deposits { diff --git a/beacon-chain/core/electra/deposits_test.go b/beacon-chain/core/electra/deposits_test.go index a502ad9b2b..77fda66086 100644 --- a/beacon-chain/core/electra/deposits_test.go +++ b/beacon-chain/core/electra/deposits_test.go @@ -319,7 +319,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) { validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0) invalidDep := ð.PendingDeposit{PublicKey: make([]byte, 48)} deps := []*eth.PendingDeposit{validDep, invalidDep} - require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps)) + require.NoError(t, electra.BatchProcessNewPendingDeposits(t.Context(), st, deps)) require.Equal(t, 1, len(st.Validators())) require.Equal(t, 1, len(st.Balances())) }) @@ -335,7 +335,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) { wc[31] = byte(0) validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0) deps := []*eth.PendingDeposit{validDep, validDep} - require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps)) + require.NoError(t, electra.BatchProcessNewPendingDeposits(t.Context(), st, deps)) require.Equal(t, 1, len(st.Validators())) require.Equal(t, 1, len(st.Balances())) require.Equal(t, params.BeaconConfig().MinActivationBalance*2, st.Balances()[0]) @@ -354,7 +354,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) { invalidSigDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0) invalidSigDep.Signature = make([]byte, 96) deps := []*eth.PendingDeposit{validDep, invalidSigDep} - require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps)) + require.NoError(t, electra.BatchProcessNewPendingDeposits(t.Context(), st, deps)) require.Equal(t, 1, len(st.Validators())) require.Equal(t, 1, len(st.Balances())) require.Equal(t, 2*params.BeaconConfig().MinActivationBalance, st.Balances()[0]) @@ -368,12 +368,12 @@ func TestProcessDepositRequests(t *testing.T) { require.NoError(t, st.SetDepositRequestsStartIndex(1)) t.Run("empty requests continues", func(t *testing.T) { - newSt, err := electra.ProcessDepositRequests(context.Background(), st, []*enginev1.DepositRequest{}) + newSt, err := electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{}) require.NoError(t, err) require.DeepEqual(t, newSt, st) }) t.Run("nil request errors", func(t *testing.T) { - _, err = electra.ProcessDepositRequests(context.Background(), st, []*enginev1.DepositRequest{nil}) + _, err = electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil}) require.ErrorContains(t, "nil deposit request", err) }) @@ -406,7 +406,7 @@ func TestProcessDepositRequests(t *testing.T) { Signature: sig.Marshal(), }, } - st, err = electra.ProcessDepositRequests(context.Background(), st, requests) + st, err = electra.ProcessDepositRequests(t.Context(), st, requests) require.NoError(t, err) pbd, err := st.PendingDeposits() @@ -437,7 +437,7 @@ func TestProcessDeposit_Electra_Simple(t *testing.T) { }, }) require.NoError(t, err) - pdSt, err := electra.ProcessDeposits(context.Background(), st, deps) + pdSt, err := electra.ProcessDeposits(t.Context(), st, deps) require.NoError(t, err) pbd, err := pdSt.PendingDeposits() require.NoError(t, err) @@ -592,7 +592,7 @@ func TestApplyPendingDeposit_TopUp(t *testing.T) { dep := stateTesting.GeneratePendingDeposit(t, sk, excessBalance, bytesutil.ToBytes32(wc), 0) require.NoError(t, st.SetValidators(validators)) - require.NoError(t, electra.ApplyPendingDeposit(context.Background(), st, dep)) + require.NoError(t, electra.ApplyPendingDeposit(t.Context(), st, dep)) b, err := st.BalanceAtIndex(0) require.NoError(t, err) @@ -608,7 +608,7 @@ func TestApplyPendingDeposit_UnknownKey(t *testing.T) { wc[31] = byte(0) dep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0) require.Equal(t, 0, len(st.Validators())) - require.NoError(t, electra.ApplyPendingDeposit(context.Background(), st, dep)) + require.NoError(t, electra.ApplyPendingDeposit(t.Context(), st, dep)) // activates new validator require.Equal(t, 1, len(st.Validators())) b, err := st.BalanceAtIndex(0) @@ -630,7 +630,7 @@ func TestApplyPendingDeposit_InvalidSignature(t *testing.T) { Amount: 100, } require.Equal(t, 0, len(st.Validators())) - require.NoError(t, electra.ApplyPendingDeposit(context.Background(), st, dep)) + require.NoError(t, electra.ApplyPendingDeposit(t.Context(), st, dep)) // no validator added require.Equal(t, 0, len(st.Validators())) // no topup either diff --git a/beacon-chain/core/electra/transition_no_verify_sig_test.go b/beacon-chain/core/electra/transition_no_verify_sig_test.go index bed8bc0c63..fb6f30a88a 100644 --- a/beacon-chain/core/electra/transition_no_verify_sig_test.go +++ b/beacon-chain/core/electra/transition_no_verify_sig_test.go @@ -1,7 +1,6 @@ package electra_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra" @@ -54,7 +53,7 @@ func TestProcessOperationsWithNilRequests(t *testing.T) { require.NoError(t, st.SetSlot(1)) - _, err = electra.ProcessOperations(context.Background(), st, b.Block()) + _, err = electra.ProcessOperations(t.Context(), st, b.Block()) require.ErrorContains(t, tc.errMsg, err) }) } diff --git a/beacon-chain/core/electra/transition_test.go b/beacon-chain/core/electra/transition_test.go index f70da2e002..05bf1739f6 100644 --- a/beacon-chain/core/electra/transition_test.go +++ b/beacon-chain/core/electra/transition_test.go @@ -1,7 +1,6 @@ package electra_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra" @@ -78,7 +77,7 @@ func TestProcessEpoch_CanProcessElectra(t *testing.T) { TargetIndex: 1, }, })) - err := electra.ProcessEpoch(context.Background(), st) + err := electra.ProcessEpoch(t.Context(), st) require.NoError(t, err) require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance") diff --git a/beacon-chain/core/electra/withdrawals_test.go b/beacon-chain/core/electra/withdrawals_test.go index d898814551..429878824a 100644 --- a/beacon-chain/core/electra/withdrawals_test.go +++ b/beacon-chain/core/electra/withdrawals_test.go @@ -1,7 +1,6 @@ package electra_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra" @@ -290,7 +289,7 @@ func TestProcessWithdrawRequests(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := electra.ProcessWithdrawalRequests(context.Background(), tt.args.st, tt.args.wrs) + got, err := electra.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs) if (err != nil) != tt.wantErr { t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/beacon-chain/core/epoch/epoch_processing_test.go b/beacon-chain/core/epoch/epoch_processing_test.go index ef7085002c..59ee6040d1 100644 --- a/beacon-chain/core/epoch/epoch_processing_test.go +++ b/beacon-chain/core/epoch/epoch_processing_test.go @@ -1,7 +1,6 @@ package epoch_test import ( - "context" "fmt" "math" "testing" @@ -170,7 +169,7 @@ func TestProcessRegistryUpdates_NoRotation(t *testing.T) { } beaconState, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState) + newState, err := epoch.ProcessRegistryUpdates(t.Context(), beaconState) require.NoError(t, err) for i, validator := range newState.Validators() { assert.Equal(t, params.BeaconConfig().MaxSeedLookahead, validator.ExitEpoch, "Could not update registry %d", i) @@ -194,7 +193,7 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) { beaconState, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) currentEpoch := time.CurrentEpoch(beaconState) - newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState) + newState, err := epoch.ProcessRegistryUpdates(t.Context(), beaconState) require.NoError(t, err) for i, validator := range newState.Validators() { if uint64(i) < limit && validator.ActivationEpoch != helpers.ActivationExitEpoch(currentEpoch) { @@ -229,7 +228,7 @@ func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) { beaconState, err := state_native.InitializeFromProtoDeneb(base) require.NoError(t, err) currentEpoch := time.CurrentEpoch(beaconState) - newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState) + newState, err := epoch.ProcessRegistryUpdates(t.Context(), beaconState) require.NoError(t, err) for i, validator := range newState.Validators() { // Note: In Deneb, only validators indices before `MaxPerEpochActivationChurnLimit` should be activated. @@ -257,7 +256,7 @@ func TestProcessRegistryUpdates_ActivationCompletes(t *testing.T) { } beaconState, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState) + newState, err := epoch.ProcessRegistryUpdates(t.Context(), beaconState) require.NoError(t, err) for i, validator := range newState.Validators() { assert.Equal(t, params.BeaconConfig().MaxSeedLookahead, validator.ExitEpoch, "Could not update registry %d, unexpected exit slot", i) @@ -281,7 +280,7 @@ func TestProcessRegistryUpdates_ValidatorsEjected(t *testing.T) { } beaconState, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState) + newState, err := epoch.ProcessRegistryUpdates(t.Context(), beaconState) require.NoError(t, err) for i, validator := range newState.Validators() { assert.Equal(t, params.BeaconConfig().MaxSeedLookahead+1, validator.ExitEpoch, "Could not update registry %d, unexpected exit slot", i) @@ -306,7 +305,7 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) { } beaconState, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - newState, err := epoch.ProcessRegistryUpdates(context.Background(), beaconState) + newState, err := epoch.ProcessRegistryUpdates(t.Context(), beaconState) require.NoError(t, err) for i, validator := range newState.Validators() { assert.Equal(t, exitEpoch, validator.ExitEpoch, "Could not update registry %d, unexpected exit slot", i) @@ -386,7 +385,7 @@ func TestProcessHistoricalDataUpdate(t *testing.T) { name: "before capella can process and get historical root", st: func() state.BeaconState { st, _ := util.DeterministicGenesisState(t, 1) - st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerHistoricalRoot-1) + st, err := transition.ProcessSlots(t.Context(), st, params.BeaconConfig().SlotsPerHistoricalRoot-1) require.NoError(t, err) return st }, @@ -410,7 +409,7 @@ func TestProcessHistoricalDataUpdate(t *testing.T) { name: "after capella can process and get historical summary", st: func() state.BeaconState { st, _ := util.DeterministicGenesisStateCapella(t, 1) - st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerHistoricalRoot-1) + st, err := transition.ProcessSlots(t.Context(), st, params.BeaconConfig().SlotsPerHistoricalRoot-1) require.NoError(t, err) return st }, diff --git a/beacon-chain/core/epoch/precompute/attestation_test.go b/beacon-chain/core/epoch/precompute/attestation_test.go index fc0eb65dfe..7a45fb7181 100644 --- a/beacon-chain/core/epoch/precompute/attestation_test.go +++ b/beacon-chain/core/epoch/precompute/attestation_test.go @@ -1,7 +1,6 @@ package precompute_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/epoch/precompute" @@ -206,10 +205,10 @@ func TestProcessAttestations(t *testing.T) { for i := 0; i < len(pVals); i++ { pVals[i] = &precompute.Validator{CurrentEpochEffectiveBalance: 100} } - pVals, _, err = precompute.ProcessAttestations(context.Background(), beaconState, pVals, &precompute.Balance{}) + pVals, _, err = precompute.ProcessAttestations(t.Context(), beaconState, pVals, &precompute.Balance{}) require.NoError(t, err) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex) require.NoError(t, err) indices, err := attestation.AttestingIndices(att1, committee) require.NoError(t, err) @@ -218,7 +217,7 @@ func TestProcessAttestations(t *testing.T) { t.Error("Not a prev epoch attester") } } - committee, err = helpers.BeaconCommitteeFromState(context.Background(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex) + committee, err = helpers.BeaconCommitteeFromState(t.Context(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex) require.NoError(t, err) indices, err = attestation.AttestingIndices(att2, committee) require.NoError(t, err) diff --git a/beacon-chain/core/epoch/precompute/justification_finalization_test.go b/beacon-chain/core/epoch/precompute/justification_finalization_test.go index 9cadae6500..95d7d92d2b 100644 --- a/beacon-chain/core/epoch/precompute/justification_finalization_test.go +++ b/beacon-chain/core/epoch/precompute/justification_finalization_test.go @@ -1,7 +1,6 @@ package precompute_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair" @@ -239,7 +238,7 @@ func TestUnrealizedCheckpoints(t *testing.T) { state, err := state_native.InitializeFromProtoAltair(base) require.NoError(t, err) - _, _, err = altair.InitializePrecomputeValidators(context.Background(), state) + _, _, err = altair.InitializePrecomputeValidators(t.Context(), state) require.NoError(t, err) jc, fc, err := precompute.UnrealizedCheckpoints(state) diff --git a/beacon-chain/core/epoch/precompute/new_test.go b/beacon-chain/core/epoch/precompute/new_test.go index c8ed1836a3..de2468d715 100644 --- a/beacon-chain/core/epoch/precompute/new_test.go +++ b/beacon-chain/core/epoch/precompute/new_test.go @@ -1,7 +1,6 @@ package precompute_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/epoch/precompute" @@ -29,7 +28,7 @@ func TestNew(t *testing.T) { }) require.NoError(t, err) e := params.BeaconConfig().FarFutureSlot - v, b, err := precompute.New(context.Background(), s) + v, b, err := precompute.New(t.Context(), s) require.NoError(t, err) assert.DeepEqual(t, &precompute.Validator{ IsSlashed: true, diff --git a/beacon-chain/core/epoch/precompute/reward_penalty_test.go b/beacon-chain/core/epoch/precompute/reward_penalty_test.go index 21d93ede15..279f819970 100644 --- a/beacon-chain/core/epoch/precompute/reward_penalty_test.go +++ b/beacon-chain/core/epoch/precompute/reward_penalty_test.go @@ -1,7 +1,6 @@ package precompute import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers" @@ -40,9 +39,9 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) { beaconState, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - vp, bp, err := New(context.Background(), beaconState) + vp, bp, err := New(t.Context(), beaconState) require.NoError(t, err) - vp, bp, err = ProcessAttestations(context.Background(), beaconState, vp, bp) + vp, bp, err = ProcessAttestations(t.Context(), beaconState, vp, bp) require.NoError(t, err) processedState, err := ProcessRewardsAndPenaltiesPrecompute(beaconState, bp, vp, AttestationsDelta, ProposersDelta) @@ -83,9 +82,9 @@ func TestAttestationDeltas_ZeroEpoch(t *testing.T) { beaconState, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - pVals, pBal, err := New(context.Background(), beaconState) + pVals, pBal, err := New(t.Context(), beaconState) assert.NoError(t, err) - pVals, pBal, err = ProcessAttestations(context.Background(), beaconState, pVals, pBal) + pVals, pBal, err = ProcessAttestations(t.Context(), beaconState, pVals, pBal) require.NoError(t, err) pBal.ActiveCurrentEpoch = 0 // Could cause a divide by zero panic. @@ -121,9 +120,9 @@ func TestAttestationDeltas_ZeroInclusionDelay(t *testing.T) { beaconState, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - pVals, pBal, err := New(context.Background(), beaconState) + pVals, pBal, err := New(t.Context(), beaconState) require.NoError(t, err) - _, _, err = ProcessAttestations(context.Background(), beaconState, pVals, pBal) + _, _, err = ProcessAttestations(t.Context(), beaconState, pVals, pBal) require.ErrorContains(t, "attestation with inclusion delay of 0", err) } @@ -155,9 +154,9 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing. require.NoError(t, beaconState.SetValidators(vs)) } - vp, bp, err := New(context.Background(), beaconState) + vp, bp, err := New(t.Context(), beaconState) require.NoError(t, err) - vp, bp, err = ProcessAttestations(context.Background(), beaconState, vp, bp) + vp, bp, err = ProcessAttestations(t.Context(), beaconState, vp, bp) require.NoError(t, err) rewards, penalties, err := AttestationsDelta(beaconState, bp, vp) require.NoError(t, err) diff --git a/beacon-chain/core/fulu/transition_test.go b/beacon-chain/core/fulu/transition_test.go index a61a13aafe..a1f5695c2e 100644 --- a/beacon-chain/core/fulu/transition_test.go +++ b/beacon-chain/core/fulu/transition_test.go @@ -1,7 +1,6 @@ package fulu_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/fulu" @@ -13,11 +12,11 @@ import ( func TestProcessEpoch_CanProcessFulu(t *testing.T) { st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch)) - st, err := fulu.UpgradeToFulu(context.Background(), st) + st, err := fulu.UpgradeToFulu(t.Context(), st) require.NoError(t, err) preLookahead, err := st.ProposerLookahead() require.NoError(t, err) - err = fulu.ProcessEpoch(context.Background(), st) + err = fulu.ProcessEpoch(t.Context(), st) require.NoError(t, err) postLookahead, err := st.ProposerLookahead() require.NoError(t, err) diff --git a/beacon-chain/core/helpers/attestation_test.go b/beacon-chain/core/helpers/attestation_test.go index b098f0e63e..de142262c5 100644 --- a/beacon-chain/core/helpers/attestation_test.go +++ b/beacon-chain/core/helpers/attestation_test.go @@ -1,7 +1,6 @@ package helpers_test import ( - "context" "strconv" "testing" "time" @@ -23,7 +22,7 @@ func TestAttestation_IsAggregator(t *testing.T) { helpers.ClearCache() beaconState, privKeys := util.DeterministicGenesisState(t, 100) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, 0, 0) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, 0, 0) require.NoError(t, err) sig := privKeys[0].Sign([]byte{'A'}) agg, err := helpers.IsAggregator(uint64(len(committee)), sig.Marshal()) @@ -38,7 +37,7 @@ func TestAttestation_IsAggregator(t *testing.T) { params.OverrideBeaconConfig(params.MinimalSpecConfig()) beaconState, privKeys := util.DeterministicGenesisState(t, 2048) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, 0, 0) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, 0, 0) require.NoError(t, err) sig := privKeys[0].Sign([]byte{'A'}) agg, err := helpers.IsAggregator(uint64(len(committee)), sig.Marshal()) @@ -73,7 +72,7 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) { RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), }) require.NoError(t, err) - valCount, err := helpers.ActiveValidatorCount(context.Background(), state, slots.ToEpoch(34)) + valCount, err := helpers.ActiveValidatorCount(t.Context(), state, slots.ToEpoch(34)) require.NoError(t, err) t.Run("Phase 0", func(t *testing.T) { diff --git a/beacon-chain/core/helpers/beacon_committee_test.go b/beacon-chain/core/helpers/beacon_committee_test.go index 3f2d85b70a..002923177f 100644 --- a/beacon-chain/core/helpers/beacon_committee_test.go +++ b/beacon-chain/core/helpers/beacon_committee_test.go @@ -1,7 +1,6 @@ package helpers_test import ( - "context" "fmt" "strconv" "testing" @@ -50,7 +49,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) { require.NoError(t, err) epoch := time.CurrentEpoch(state) - indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch) + indices, err := helpers.ActiveValidatorIndices(t.Context(), state, epoch) require.NoError(t, err) seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) require.NoError(t, err) @@ -112,10 +111,10 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) { Slot: 0, // Epoch 0. }) require.NoError(t, err) - _, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1, nil) + _, err = helpers.CommitteeAssignments(t.Context(), state, epoch+1, nil) assert.ErrorContains(t, "can't be greater than next epoch", err) - _, err = helpers.ProposerAssignments(context.Background(), state, epoch+1) + _, err = helpers.ProposerAssignments(t.Context(), state, epoch+1) assert.ErrorContains(t, "can't be greater than next epoch", err) } @@ -139,7 +138,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) { RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), }) require.NoError(t, err) - assignments, err := helpers.ProposerAssignments(context.Background(), state, 0) + assignments, err := helpers.ProposerAssignments(t.Context(), state, 0) require.NoError(t, err, "Failed to determine Assignments") for _, slots := range assignments { for _, s := range slots { @@ -214,12 +213,12 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { helpers.ClearCache() - assignments, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot), validatorIndices) + assignments, err := helpers.CommitteeAssignments(t.Context(), state, slots.ToEpoch(tt.slot), validatorIndices) require.NoError(t, err, "Failed to determine Assignments") cac := assignments[tt.index] assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index) assert.Equal(t, tt.slot, cac.AttesterSlot, "Unexpected slot for validator index %d", tt.index) - proposerAssignments, err := helpers.ProposerAssignments(context.Background(), state, slots.ToEpoch(tt.slot)) + proposerAssignments, err := helpers.ProposerAssignments(t.Context(), state, slots.ToEpoch(tt.slot)) require.NoError(t, err) if len(proposerAssignments[tt.index]) > 0 && proposerAssignments[tt.index][0] != tt.proposerSlot { t.Errorf("wanted proposer slot %d, got proposer slot %d for validator index %d", @@ -253,11 +252,11 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) { RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), }) require.NoError(t, err) - assignments, err := helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state)) + assignments, err := helpers.ProposerAssignments(t.Context(), state, time.CurrentEpoch(state)) require.NoError(t, err) require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set") - assignments, err = helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state)+1) + assignments, err = helpers.ProposerAssignments(t.Context(), state, time.CurrentEpoch(state)+1) require.NoError(t, err) require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set") } @@ -279,7 +278,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), }) require.NoError(t, err) - _, err = helpers.CommitteeAssignments(context.Background(), state, 0, nil) + _, err = helpers.CommitteeAssignments(t.Context(), state, 0, nil) require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err) } @@ -301,7 +300,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) { }) require.NoError(t, err) epoch := primitives.Epoch(1) - assignments, err := helpers.ProposerAssignments(context.Background(), state, epoch) + assignments, err := helpers.ProposerAssignments(t.Context(), state, epoch) require.NoError(t, err, "Failed to determine Assignments") slotsWithProposers := make(map[primitives.Slot]bool) @@ -412,7 +411,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) { require.NoError(t, state.SetSlot(tt.stateSlot)) att := tt.attestation // Verify attesting indices are correct. - committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.GetData().Slot, att.GetData().CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), state, att.GetData().Slot, att.GetData().CommitteeIndex) require.NoError(t, err) require.NotNil(t, committee) err = helpers.VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))) @@ -442,14 +441,14 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) { RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), }) require.NoError(t, err) - require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state))) + require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), state, time.CurrentEpoch(state))) epoch := primitives.Epoch(0) idx := primitives.CommitteeIndex(1) seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) require.NoError(t, err) - indices, err = helpers.CommitteeCache().Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx) + indices, err = helpers.CommitteeCache().Committee(t.Context(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx) require.NoError(t, err) assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths") } @@ -473,7 +472,7 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) { }) require.NoError(t, err) e := time.CurrentEpoch(state) - require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e)) + require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), state, e)) seed, err := helpers.Seed(state, e, params.BeaconConfig().DomainBeaconAttester) require.NoError(t, err) @@ -483,7 +482,7 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) { require.NoError(t, err) require.Equal(t, false, helpers.CommitteeCache().HasEntry(string(nextSeed[:]))) - require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e+1)) + require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), state, e+1)) require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(nextSeed[:]))) } @@ -502,7 +501,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) { require.NoError(b, err) epoch := time.CurrentEpoch(state) - indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch) + indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch) require.NoError(b, err) seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) require.NoError(b, err) @@ -536,7 +535,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) { require.NoError(b, err) epoch := time.CurrentEpoch(state) - indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch) + indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch) require.NoError(b, err) seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) require.NoError(b, err) @@ -570,7 +569,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) { require.NoError(b, err) epoch := time.CurrentEpoch(state) - indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch) + indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch) require.NoError(b, err) seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) require.NoError(b, err) @@ -605,7 +604,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) { require.NoError(b, err) epoch := time.CurrentEpoch(state) - indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch) + indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch) require.NoError(b, err) seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) require.NoError(b, err) @@ -640,7 +639,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) { require.NoError(b, err) epoch := time.CurrentEpoch(state) - indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch) + indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch) require.NoError(b, err) seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) require.NoError(b, err) @@ -676,13 +675,13 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) { RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), }) require.NoError(t, err) - _, err = helpers.BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0) + _, err = helpers.BeaconCommitteeFromState(t.Context(), state, 1 /* previous epoch */, 0) require.NoError(t, err) // Verify previous epoch is cached seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconAttester) require.NoError(t, err) - activeIndices, err := helpers.CommitteeCache().ActiveIndices(context.Background(), seed) + activeIndices, err := helpers.CommitteeCache().ActiveIndices(t.Context(), seed) require.NoError(t, err) assert.NotNil(t, activeIndices, "Did not cache active indices") } @@ -701,7 +700,7 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) { }) require.NoError(t, err) - indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0) + indices, err := helpers.ActiveValidatorIndices(t.Context(), state, 0) require.NoError(t, err) proposerIndices, err := helpers.PrecomputeProposerIndices(state, indices, time.CurrentEpoch(state)) @@ -730,7 +729,7 @@ func TestCommitteeIndices(t *testing.T) { } func TestAttestationCommitteesFromState(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize)) for i := 0; i < len(validators); i++ { @@ -766,7 +765,7 @@ func TestAttestationCommitteesFromState(t *testing.T) { } func TestAttestationCommitteesFromCache(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize)) for i := 0; i < len(validators); i++ { @@ -814,7 +813,7 @@ func TestAttestationCommitteesFromCache(t *testing.T) { } func TestBeaconCommitteesFromState(t *testing.T) { - ctx := context.Background() + ctx := t.Context() params.SetupTestConfigCleanup(t) c := params.BeaconConfig().Copy() @@ -839,7 +838,7 @@ func TestBeaconCommitteesFromState(t *testing.T) { } func TestBeaconCommitteesFromCache(t *testing.T) { - ctx := context.Background() + ctx := t.Context() params.SetupTestConfigCleanup(t) c := params.BeaconConfig().Copy() @@ -875,7 +874,7 @@ func TestBeaconCommitteesFromCache(t *testing.T) { func TestPrecomputeCommittees_HappyPath(t *testing.T) { cfg := params.BeaconConfig() start := primitives.Slot(100) - ctx := context.Background() + ctx := t.Context() st, _ := util.DeterministicGenesisState(t, 256) got, err := helpers.PrecomputeCommittees(ctx, st, start) diff --git a/beacon-chain/core/helpers/validators_test.go b/beacon-chain/core/helpers/validators_test.go index 1c476f9156..877c85e974 100644 --- a/beacon-chain/core/helpers/validators_test.go +++ b/beacon-chain/core/helpers/validators_test.go @@ -1,7 +1,6 @@ package helpers_test import ( - "context" "errors" "testing" @@ -228,7 +227,7 @@ func TestBeaconProposerIndex_OK(t *testing.T) { helpers.ClearCache() require.NoError(t, state.SetSlot(tt.slot)) - result, err := helpers.BeaconProposerIndex(context.Background(), state) + result, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err, "Failed to get shard and committees at slot") assert.Equal(t, tt.index, result, "Result index was an unexpected value") } @@ -263,7 +262,7 @@ func TestBeaconProposerIndex_BadState(t *testing.T) { // Set a very high slot, so that retrieved block root will be // non existent for the proposer cache. require.NoError(t, state.SetSlot(100)) - _, err = helpers.BeaconProposerIndex(context.Background(), state) + _, err = helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err) } @@ -283,7 +282,7 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) { }) require.NoError(t, err) - indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0) + indices, err := helpers.ActiveValidatorIndices(t.Context(), state, 0) require.NoError(t, err) var proposerIndices []primitives.ValidatorIndex @@ -338,8 +337,8 @@ func TestActiveValidatorCount_Genesis(t *testing.T) { // Preset cache to a bad count. seed, err := helpers.Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester) require.NoError(t, err) - require.NoError(t, helpers.CommitteeCache().AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}})) - validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState)) + require.NoError(t, helpers.CommitteeCache().AddCommitteeShuffledList(t.Context(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}})) + validatorCount, err := helpers.ActiveValidatorCount(t.Context(), beaconState, time.CurrentEpoch(beaconState)) require.NoError(t, err) assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count") } @@ -370,7 +369,7 @@ func TestChurnLimit_OK(t *testing.T) { RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), }) require.NoError(t, err) - validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState)) + validatorCount, err := helpers.ActiveValidatorCount(t.Context(), beaconState, time.CurrentEpoch(beaconState)) require.NoError(t, err) resultChurn := helpers.ValidatorActivationChurnLimit(validatorCount) assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount) @@ -407,7 +406,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) { require.NoError(t, err) // Get active validator count - validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState)) + validatorCount, err := helpers.ActiveValidatorCount(t.Context(), beaconState, time.CurrentEpoch(beaconState)) require.NoError(t, err) // Test churn limit calculation @@ -580,7 +579,7 @@ func TestActiveValidatorIndices(t *testing.T) { s, err := state_native.InitializeFromProtoPhase0(tt.args.state) require.NoError(t, err) - got, err := helpers.ActiveValidatorIndices(context.Background(), s, tt.args.epoch) + got, err := helpers.ActiveValidatorIndices(t.Context(), s, tt.args.epoch) if tt.wantedErr != "" { assert.ErrorContains(t, tt.wantedErr, err) return @@ -885,7 +884,7 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) { require.NoError(t, beaconState.SetValidators(validators)) require.NoError(t, beaconState.SetBalances(balances)) - index, err := helpers.LastActivatedValidatorIndex(context.Background(), beaconState) + index, err := helpers.LastActivatedValidatorIndex(t.Context(), beaconState) require.NoError(t, err) require.Equal(t, index, primitives.ValidatorIndex(3)) } diff --git a/beacon-chain/core/helpers/weak_subjectivity_test.go b/beacon-chain/core/helpers/weak_subjectivity_test.go index 087e852b75..43a20d93c5 100644 --- a/beacon-chain/core/helpers/weak_subjectivity_test.go +++ b/beacon-chain/core/helpers/weak_subjectivity_test.go @@ -1,7 +1,6 @@ package helpers_test import ( - "context" "fmt" "testing" @@ -49,7 +48,7 @@ func TestWeakSubjectivity_ComputeWeakSubjectivityPeriod(t *testing.T) { // Reset committee cache - as we need to recalculate active validator set for each test. helpers.ClearCache() - got, err := helpers.ComputeWeakSubjectivityPeriod(context.Background(), genState(t, tt.valCount, tt.avgBalance), params.BeaconConfig()) + got, err := helpers.ComputeWeakSubjectivityPeriod(t.Context(), genState(t, tt.valCount, tt.avgBalance), params.BeaconConfig()) require.NoError(t, err) assert.Equal(t, tt.want, got, "valCount: %v, avgBalance: %v", tt.valCount, tt.avgBalance) }) @@ -181,7 +180,7 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) { helpers.ClearCache() sr, _, e := tt.genWsCheckpoint() - got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), sr, e, params.BeaconConfig()) + got, err := helpers.IsWithinWeakSubjectivityPeriod(t.Context(), tt.epoch, tt.genWsState(), sr, e, params.BeaconConfig()) if tt.wantedErr != "" { assert.Equal(t, false, got) assert.ErrorContains(t, tt.wantedErr, err) diff --git a/beacon-chain/core/signing/signing_root_test.go b/beacon-chain/core/signing/signing_root_test.go index f8c3d631d0..1f2b1639a1 100644 --- a/beacon-chain/core/signing/signing_root_test.go +++ b/beacon-chain/core/signing/signing_root_test.go @@ -2,7 +2,6 @@ package signing_test import ( "bytes" - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers" @@ -83,7 +82,7 @@ func TestSigningRoot_ComputeDomainAndSign(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { beaconState, privKeys := tt.genState(t) - idx, err := helpers.BeaconProposerIndex(context.Background(), beaconState) + idx, err := helpers.BeaconProposerIndex(t.Context(), beaconState) require.NoError(t, err) block := tt.genBlock(t, beaconState, privKeys) got, err := signing.ComputeDomainAndSign( diff --git a/beacon-chain/core/transition/altair_transition_no_verify_sig_test.go b/beacon-chain/core/transition/altair_transition_no_verify_sig_test.go index 6a17d47f26..5e6ec67d0d 100644 --- a/beacon-chain/core/transition/altair_transition_no_verify_sig_test.go +++ b/beacon-chain/core/transition/altair_transition_no_verify_sig_test.go @@ -1,7 +1,6 @@ package transition_test import ( - "context" "math" "testing" @@ -27,7 +26,7 @@ import ( func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100) - syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee)) @@ -51,11 +50,11 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) { require.NoError(t, err) require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot()))) - nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1) + nextSlotState, err := transition.ProcessSlots(t.Context(), beaconState.Copy(), beaconState.Slot()+1) require.NoError(t, err) parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot() require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), nextSlotState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), nextSlotState) require.NoError(t, err) block := util.NewBeaconBlockAltair() block.Block.ProposerIndex = proposerIdx @@ -68,10 +67,10 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) { for i := range syncBits { syncBits[i] = 0xff } - indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState) + indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState) require.NoError(t, err) h := beaconState.LatestBlockHeader().Copy() - prevStateRoot, err := beaconState.HashTreeRoot(context.Background()) + prevStateRoot, err := beaconState.HashTreeRoot(t.Context()) require.NoError(t, err) h.StateRoot = prevStateRoot[:] pbr, err := h.HashTreeRoot() @@ -93,7 +92,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) { block.Block.Body.SyncAggregate = syncAggregate wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb) + stateRoot, err := transition.CalculateStateRoot(t.Context(), beaconState, wsb) require.NoError(t, err) block.Block.StateRoot = stateRoot[:] @@ -104,7 +103,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) { wsb, err = blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb) + set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(t.Context(), beaconState, wsb) require.NoError(t, err) verified, err := set.Verify() require.NoError(t, err) @@ -114,7 +113,7 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) { func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100) - syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee)) @@ -138,11 +137,11 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t require.NoError(t, err) require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot()))) - nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1) + nextSlotState, err := transition.ProcessSlots(t.Context(), beaconState.Copy(), beaconState.Slot()+1) require.NoError(t, err) parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot() require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), nextSlotState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), nextSlotState) require.NoError(t, err) block := util.NewBeaconBlockAltair() block.Block.ProposerIndex = proposerIdx @@ -155,10 +154,10 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t for i := range syncBits { syncBits[i] = 0xff } - indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState) + indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState) require.NoError(t, err) h := beaconState.LatestBlockHeader().Copy() - prevStateRoot, err := beaconState.HashTreeRoot(context.Background()) + prevStateRoot, err := beaconState.HashTreeRoot(t.Context()) require.NoError(t, err) h.StateRoot = prevStateRoot[:] pbr, err := h.HashTreeRoot() @@ -181,7 +180,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb) + stateRoot, err := transition.CalculateStateRoot(t.Context(), beaconState, wsb) require.NoError(t, err) block.Block.StateRoot = stateRoot[:] @@ -193,7 +192,7 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32) wsb, err = blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb) + _, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(t.Context(), beaconState, wsb) require.ErrorContains(t, "could not validate state root", err) } @@ -201,7 +200,7 @@ func TestExecuteStateTransitionNoVerifyAnySig_PassesProcessingConditions(t *test beaconState, block := createFullAltairBlockWithOperations(t) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb) + set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(t.Context(), beaconState, wsb) require.NoError(t, err) // Test Signature set verifies. verified, err := set.Verify() @@ -226,14 +225,14 @@ func TestProcessEpoch_BadBalanceAltair(t *testing.T) { epochParticipation[0] = participation assert.NoError(t, s.SetCurrentParticipationBits(epochParticipation)) assert.NoError(t, s.SetPreviousParticipationBits(epochParticipation)) - err = altair.ProcessEpoch(context.Background(), s) + err = altair.ProcessEpoch(t.Context(), s) assert.ErrorContains(t, "addition overflows", err) } func createFullAltairBlockWithOperations(t *testing.T) (state.BeaconState, *ethpb.SignedBeaconBlockAltair) { beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 32) - sCom, err := altair.NextSyncCommittee(context.Background(), beaconState) + sCom, err := altair.NextSyncCommittee(t.Context(), beaconState) assert.NoError(t, err) assert.NoError(t, beaconState.SetCurrentSyncCommittee(sCom)) tState := beaconState.Copy() diff --git a/beacon-chain/core/transition/bellatrix_transition_no_verify_sig_test.go b/beacon-chain/core/transition/bellatrix_transition_no_verify_sig_test.go index 8f48ac14ee..0f9e47d4dd 100644 --- a/beacon-chain/core/transition/bellatrix_transition_no_verify_sig_test.go +++ b/beacon-chain/core/transition/bellatrix_transition_no_verify_sig_test.go @@ -1,7 +1,6 @@ package transition_test import ( - "context" "math" "testing" @@ -29,7 +28,7 @@ import ( func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, 100) - syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee)) @@ -53,11 +52,11 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) { require.NoError(t, err) require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot()))) - nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1) + nextSlotState, err := transition.ProcessSlots(t.Context(), beaconState.Copy(), beaconState.Slot()+1) require.NoError(t, err) parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot() require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), nextSlotState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), nextSlotState) require.NoError(t, err) block := util.NewBeaconBlockBellatrix() block.Block.ProposerIndex = proposerIdx @@ -70,10 +69,10 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) { for i := range syncBits { syncBits[i] = 0xff } - indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState) + indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState) require.NoError(t, err) h := beaconState.LatestBlockHeader().Copy() - prevStateRoot, err := beaconState.HashTreeRoot(context.Background()) + prevStateRoot, err := beaconState.HashTreeRoot(t.Context()) require.NoError(t, err) h.StateRoot = prevStateRoot[:] pbr, err := h.HashTreeRoot() @@ -95,7 +94,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) { block.Block.Body.SyncAggregate = syncAggregate wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb) + stateRoot, err := transition.CalculateStateRoot(t.Context(), beaconState, wsb) require.NoError(t, err) block.Block.StateRoot = stateRoot[:] @@ -106,7 +105,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) { wsb, err = blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb) + set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(t.Context(), beaconState, wsb) require.NoError(t, err) verified, err := set.Verify() require.NoError(t, err) @@ -116,7 +115,7 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) { func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, 100) - syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), beaconState) require.NoError(t, err) require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee)) @@ -140,11 +139,11 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo require.NoError(t, err) require.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot()))) - nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1) + nextSlotState, err := transition.ProcessSlots(t.Context(), beaconState.Copy(), beaconState.Slot()+1) require.NoError(t, err) parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot() require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), nextSlotState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), nextSlotState) require.NoError(t, err) block := util.NewBeaconBlockBellatrix() block.Block.ProposerIndex = proposerIdx @@ -157,10 +156,10 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo for i := range syncBits { syncBits[i] = 0xff } - indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState) + indices, err := altair.NextSyncCommitteeIndices(t.Context(), beaconState) require.NoError(t, err) h := beaconState.LatestBlockHeader().Copy() - prevStateRoot, err := beaconState.HashTreeRoot(context.Background()) + prevStateRoot, err := beaconState.HashTreeRoot(t.Context()) require.NoError(t, err) h.StateRoot = prevStateRoot[:] pbr, err := h.HashTreeRoot() @@ -183,7 +182,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb) + stateRoot, err := transition.CalculateStateRoot(t.Context(), beaconState, wsb) require.NoError(t, err) block.Block.StateRoot = stateRoot[:] @@ -195,7 +194,7 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32) wsb, err = blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb) + _, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(t.Context(), beaconState, wsb) require.ErrorContains(t, "could not validate state root", err) } @@ -216,7 +215,7 @@ func TestProcessEpoch_BadBalanceBellatrix(t *testing.T) { epochParticipation[0] = participation assert.NoError(t, s.SetCurrentParticipationBits(epochParticipation)) assert.NoError(t, s.SetPreviousParticipationBits(epochParticipation)) - err = altair.ProcessEpoch(context.Background(), s) + err = altair.ProcessEpoch(t.Context(), s) assert.ErrorContains(t, "addition overflows", err) } diff --git a/beacon-chain/core/transition/benchmarks_test.go b/beacon-chain/core/transition/benchmarks_test.go index d49511aa2a..0b70b85436 100644 --- a/beacon-chain/core/transition/benchmarks_test.go +++ b/beacon-chain/core/transition/benchmarks_test.go @@ -1,7 +1,6 @@ package transition_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers" @@ -33,7 +32,7 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) { for i := 0; i < b.N; i++ { wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(b, err) - _, err = coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wsb) + _, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb) require.NoError(b, err) } } @@ -53,19 +52,19 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) { // some attestations in block are from previous epoch currentSlot := beaconState.Slot() require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch)) - require.NoError(b, helpers.UpdateCommitteeCache(context.Background(), beaconState, time.CurrentEpoch(beaconState))) + require.NoError(b, helpers.UpdateCommitteeCache(b.Context(), beaconState, time.CurrentEpoch(beaconState))) require.NoError(b, beaconState.SetSlot(currentSlot)) // Run the state transition once to populate the cache. wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(b, err) - _, err = coreState.ExecuteStateTransition(context.Background(), beaconState, wsb) + _, err = coreState.ExecuteStateTransition(b.Context(), beaconState, wsb) require.NoError(b, err, "Failed to process block, benchmarks will fail") b.ResetTimer() for i := 0; i < b.N; i++ { wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(b, err) - _, err = coreState.ExecuteStateTransition(context.Background(), cleanStates[i], wsb) + _, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb) require.NoError(b, err, "Failed to process block, benchmarks will fail") } } @@ -81,14 +80,14 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) { // some attestations in block are from previous epoch currentSlot := beaconState.Slot() require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch)) - require.NoError(b, helpers.UpdateCommitteeCache(context.Background(), beaconState, time.CurrentEpoch(beaconState))) + require.NoError(b, helpers.UpdateCommitteeCache(b.Context(), beaconState, time.CurrentEpoch(beaconState))) require.NoError(b, beaconState.SetSlot(currentSlot)) b.ResetTimer() for i := 0; i < b.N; i++ { // ProcessEpochPrecompute is the optimized version of process epoch. It's enabled by default // at run time. - _, err := coreState.ProcessEpochPrecompute(context.Background(), beaconState.Copy()) + _, err := coreState.ProcessEpochPrecompute(b.Context(), beaconState.Copy()) require.NoError(b, err) } } @@ -99,7 +98,7 @@ func BenchmarkHashTreeRoot_FullState(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := beaconState.HashTreeRoot(context.Background()) + _, err := beaconState.HashTreeRoot(b.Context()) require.NoError(b, err) } } @@ -108,7 +107,7 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) { beaconState, err := benchmark.PreGenstateFullEpochs() require.NoError(b, err) - ctx := context.Background() + ctx := b.Context() // Hydrate the HashTreeRootState cache. _, err = beaconState.HashTreeRoot(ctx) diff --git a/beacon-chain/core/transition/skip_slot_cache_test.go b/beacon-chain/core/transition/skip_slot_cache_test.go index 064a675f20..2184b090c2 100644 --- a/beacon-chain/core/transition/skip_slot_cache_test.go +++ b/beacon-chain/core/transition/skip_slot_cache_test.go @@ -1,7 +1,6 @@ package transition_test import ( - "context" "sync" "testing" @@ -34,12 +33,12 @@ func TestSkipSlotCache_OK(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wsb) + executedState, err := transition.ExecuteStateTransition(t.Context(), originalState, wsb) require.NoError(t, err, "Could not run state transition") require.Equal(t, true, executedState.Version() == version.Phase0) wsb, err = blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - bState, err = transition.ExecuteStateTransition(context.Background(), bState, wsb) + bState, err = transition.ExecuteStateTransition(t.Context(), bState, wsb) require.NoError(t, err, "Could not process state transition") assert.DeepEqual(t, originalState.ToProto(), bState.ToProto(), "Skipped slots cache leads to different states") @@ -63,7 +62,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wsb) + executedState, err := transition.ExecuteStateTransition(t.Context(), originalState, wsb) require.NoError(t, err, "Could not run state transition") require.Equal(t, true, executedState.Version() == version.Phase0) @@ -78,7 +77,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) { blk.Signature = signature.Marshal() wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - s1, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb) + s1, err = transition.ExecuteStateTransition(t.Context(), originalState.Copy(), wsb) require.NoError(t, err, "Could not run state transition") } @@ -91,13 +90,13 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) { blk.Signature = signature.Marshal() wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - s0, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb) + s0, err = transition.ExecuteStateTransition(t.Context(), originalState.Copy(), wsb) require.NoError(t, err, "Could not run state transition") } - r1, err := s1.HashTreeRoot(context.Background()) + r1, err := s1.HashTreeRoot(t.Context()) require.NoError(t, err) - r2, err := s0.HashTreeRoot(context.Background()) + r2, err := s0.HashTreeRoot(t.Context()) require.NoError(t, err) if r1 == r2 { t.Fatalf("need different starting states, got: %x", r1) @@ -120,24 +119,24 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) { } problemSlot := s1.Slot() + 2 - expected1, err := transition.ProcessSlots(context.Background(), s1.Copy(), problemSlot) + expected1, err := transition.ProcessSlots(t.Context(), s1.Copy(), problemSlot) require.NoError(t, err) - expectedRoot1, err := expected1.HashTreeRoot(context.Background()) + expectedRoot1, err := expected1.HashTreeRoot(t.Context()) require.NoError(t, err) t.Logf("chain 1 (even i) expected root %x at slot %d", expectedRoot1[:], problemSlot) - tmp1, err := transition.ProcessSlots(context.Background(), expected1.Copy(), problemSlot+1) + tmp1, err := transition.ProcessSlots(t.Context(), expected1.Copy(), problemSlot+1) require.NoError(t, err) gotRoot := tmp1.StateRoots()[problemSlot] require.DeepEqual(t, expectedRoot1[:], gotRoot, "State roots for chain 1 are bad, expected root doesn't match") - expected2, err := transition.ProcessSlots(context.Background(), s0.Copy(), problemSlot) + expected2, err := transition.ProcessSlots(t.Context(), s0.Copy(), problemSlot) require.NoError(t, err) - expectedRoot2, err := expected2.HashTreeRoot(context.Background()) + expectedRoot2, err := expected2.HashTreeRoot(t.Context()) require.NoError(t, err) t.Logf("chain 2 (odd i) expected root %x at slot %d", expectedRoot2[:], problemSlot) - tmp2, err := transition.ProcessSlots(context.Background(), expected2.Copy(), problemSlot+1) + tmp2, err := transition.ProcessSlots(t.Context(), expected2.Copy(), problemSlot+1) require.NoError(t, err) gotRoot = tmp2.StateRoots()[problemSlot] require.DeepEqual(t, expectedRoot2[:], gotRoot, "State roots for chain 2 are bad, expected root doesn't match") @@ -147,7 +146,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) { step := func(i int, setup state.BeaconState) { // go at least 1 past problemSlot, to ensure problem slot state root is available - outState, err := transition.ProcessSlots(context.Background(), setup, problemSlot.Add(1+uint64(i))) // keep increasing, to hit and extend the cache + outState, err := transition.ProcessSlots(t.Context(), setup, problemSlot.Add(1+uint64(i))) // keep increasing, to hit and extend the cache require.NoError(t, err, "Could not process state transition") roots := outState.StateRoots() gotRoot := roots[problemSlot] diff --git a/beacon-chain/core/transition/state_fuzz_test.go b/beacon-chain/core/transition/state_fuzz_test.go index feb4adcd83..7419bf853f 100644 --- a/beacon-chain/core/transition/state_fuzz_test.go +++ b/beacon-chain/core/transition/state_fuzz_test.go @@ -1,7 +1,6 @@ package transition import ( - "context" "testing" state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native" @@ -22,7 +21,7 @@ func TestGenesisBeaconState_1000(t *testing.T) { fuzzer.Fuzz(&deposits) fuzzer.Fuzz(&genesisTime) fuzzer.Fuzz(eth1Data) - gs, err := GenesisBeaconState(context.Background(), deposits, genesisTime, eth1Data) + gs, err := GenesisBeaconState(t.Context(), deposits, genesisTime, eth1Data) if err != nil { if gs != nil { t.Fatalf("Genesis state should be nil on err. found: %v on error: %v for inputs deposit: %v "+ diff --git a/beacon-chain/core/transition/state_test.go b/beacon-chain/core/transition/state_test.go index f0d92d9a51..1420873211 100644 --- a/beacon-chain/core/transition/state_test.go +++ b/beacon-chain/core/transition/state_test.go @@ -1,7 +1,6 @@ package transition_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition" @@ -36,7 +35,7 @@ func TestGenesisBeaconState_OK(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - newState, err := transition.GenesisBeaconState(context.Background(), deposits, genesisTime, eth1Data) + newState, err := transition.GenesisBeaconState(t.Context(), deposits, genesisTime, eth1Data) require.NoError(t, err, "Could not execute GenesisBeaconState") // Misc fields checks. @@ -93,9 +92,9 @@ func TestGenesisBeaconState_OK(t *testing.T) { func TestGenesisState_HashEquality(t *testing.T) { deposits, _, err := util.DeterministicDepositsAndKeys(100) require.NoError(t, err) - state1, err := transition.GenesisBeaconState(context.Background(), deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)}) + state1, err := transition.GenesisBeaconState(t.Context(), deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)}) require.NoError(t, err) - state, err := transition.GenesisBeaconState(context.Background(), deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)}) + state, err := transition.GenesisBeaconState(t.Context(), deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)}) require.NoError(t, err) pbState1, err := state_native.ProtobufBeaconStatePhase0(state1.ToProto()) @@ -113,7 +112,7 @@ func TestGenesisState_HashEquality(t *testing.T) { } func TestGenesisState_InitializesLatestBlockHashes(t *testing.T) { - s, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{}) + s, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{}) require.NoError(t, err) got, want := uint64(len(s.BlockRoots())), uint64(params.BeaconConfig().SlotsPerHistoricalRoot) assert.Equal(t, want, got, "Wrong number of recent block hashes") @@ -127,6 +126,6 @@ func TestGenesisState_InitializesLatestBlockHashes(t *testing.T) { } func TestGenesisState_FailsWithoutEth1data(t *testing.T) { - _, err := transition.GenesisBeaconState(context.Background(), nil, 0, nil) + _, err := transition.GenesisBeaconState(t.Context(), nil, 0, nil) assert.ErrorContains(t, "no eth1data provided for genesis state", err) } diff --git a/beacon-chain/core/transition/trailing_slot_state_cache_test.go b/beacon-chain/core/transition/trailing_slot_state_cache_test.go index d2cd7f2968..886fab18b2 100644 --- a/beacon-chain/core/transition/trailing_slot_state_cache_test.go +++ b/beacon-chain/core/transition/trailing_slot_state_cache_test.go @@ -1,7 +1,6 @@ package transition_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition" @@ -12,7 +11,7 @@ import ( ) func TestTrailingSlotState_RoundTrip(t *testing.T) { - ctx := context.Background() + ctx := t.Context() r := []byte{'a'} s := transition.NextSlotState(r, 0) require.Equal(t, nil, s) @@ -36,7 +35,7 @@ func TestTrailingSlotState_RoundTrip(t *testing.T) { } func TestTrailingSlotState_StateAdvancedBeyondRequest(t *testing.T) { - ctx := context.Background() + ctx := t.Context() r := []byte{'a'} s := transition.NextSlotState(r, 0) require.Equal(t, nil, s) diff --git a/beacon-chain/core/transition/transition_fuzz_test.go b/beacon-chain/core/transition/transition_fuzz_test.go index 397e469e47..4642ab942b 100644 --- a/beacon-chain/core/transition/transition_fuzz_test.go +++ b/beacon-chain/core/transition/transition_fuzz_test.go @@ -1,7 +1,6 @@ package transition import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time" @@ -16,7 +15,7 @@ import ( func TestFuzzExecuteStateTransition_1000(t *testing.T) { SkipSlotCache.Disable() defer SkipSlotCache.Enable() - ctx := context.Background() + ctx := t.Context() state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{}) require.NoError(t, err) sb := ðpb.SignedBeaconBlock{} @@ -40,7 +39,7 @@ func TestFuzzExecuteStateTransition_1000(t *testing.T) { func TestFuzzCalculateStateRoot_1000(t *testing.T) { SkipSlotCache.Disable() defer SkipSlotCache.Enable() - ctx := context.Background() + ctx := t.Context() state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{}) require.NoError(t, err) sb := ðpb.SignedBeaconBlock{} @@ -64,7 +63,7 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) { func TestFuzzProcessSlot_1000(t *testing.T) { SkipSlotCache.Disable() defer SkipSlotCache.Enable() - ctx := context.Background() + ctx := t.Context() state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{}) require.NoError(t, err) fuzzer := fuzz.NewWithSeed(0) @@ -81,7 +80,7 @@ func TestFuzzProcessSlot_1000(t *testing.T) { func TestFuzzProcessSlots_1000(t *testing.T) { SkipSlotCache.Disable() defer SkipSlotCache.Enable() - ctx := context.Background() + ctx := t.Context() state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{}) require.NoError(t, err) slot := primitives.Slot(0) @@ -100,7 +99,7 @@ func TestFuzzProcessSlots_1000(t *testing.T) { func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) { SkipSlotCache.Disable() defer SkipSlotCache.Enable() - ctx := context.Background() + ctx := t.Context() state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{}) require.NoError(t, err) bb := ðpb.BeaconBlock{} @@ -137,7 +136,7 @@ func TestFuzzverifyOperationLengths_10000(t *testing.T) { } wb, err := blocks.NewBeaconBlock(bb) require.NoError(t, err) - _, err = VerifyOperationLengths(context.Background(), state, wb) + _, err = VerifyOperationLengths(t.Context(), state, wb) _ = err } } @@ -158,7 +157,7 @@ func TestFuzzCanProcessEpoch_10000(t *testing.T) { func TestFuzzProcessEpochPrecompute_1000(t *testing.T) { SkipSlotCache.Disable() defer SkipSlotCache.Enable() - ctx := context.Background() + ctx := t.Context() state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{}) require.NoError(t, err) fuzzer := fuzz.NewWithSeed(0) @@ -175,7 +174,7 @@ func TestFuzzProcessEpochPrecompute_1000(t *testing.T) { func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) { SkipSlotCache.Disable() defer SkipSlotCache.Enable() - ctx := context.Background() + ctx := t.Context() state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{}) require.NoError(t, err) sb := ðpb.SignedBeaconBlock{} diff --git a/beacon-chain/core/transition/transition_no_verify_sig_test.go b/beacon-chain/core/transition/transition_no_verify_sig_test.go index 98bd9e85c2..6aea1c03de 100644 --- a/beacon-chain/core/transition/transition_no_verify_sig_test.go +++ b/beacon-chain/core/transition/transition_no_verify_sig_test.go @@ -1,7 +1,6 @@ package transition_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers" @@ -39,11 +38,11 @@ func TestExecuteStateTransitionNoVerify_FullProcess(t *testing.T) { require.NoError(t, err) require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1)) - nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1) + nextSlotState, err := transition.ProcessSlots(t.Context(), beaconState.Copy(), beaconState.Slot()+1) require.NoError(t, err) parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot() require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), nextSlotState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), nextSlotState) require.NoError(t, err) block := util.NewBeaconBlock() block.Block.ProposerIndex = proposerIdx @@ -54,7 +53,7 @@ func TestExecuteStateTransitionNoVerify_FullProcess(t *testing.T) { wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb) + stateRoot, err := transition.CalculateStateRoot(t.Context(), beaconState, wsb) require.NoError(t, err) block.Block.StateRoot = stateRoot[:] @@ -65,7 +64,7 @@ func TestExecuteStateTransitionNoVerify_FullProcess(t *testing.T) { wsb, err = blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb) + set, _, err := transition.ExecuteStateTransitionNoVerifyAnySig(t.Context(), beaconState, wsb) assert.NoError(t, err) verified, err := set.Verify() assert.NoError(t, err) @@ -95,11 +94,11 @@ func TestExecuteStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *test require.NoError(t, err) require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1)) - nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1) + nextSlotState, err := transition.ProcessSlots(t.Context(), beaconState.Copy(), beaconState.Slot()+1) require.NoError(t, err) parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot() require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), nextSlotState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), nextSlotState) require.NoError(t, err) block := util.NewBeaconBlock() block.Block.ProposerIndex = proposerIdx @@ -110,7 +109,7 @@ func TestExecuteStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *test wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb) + stateRoot, err := transition.CalculateStateRoot(t.Context(), beaconState, wsb) require.NoError(t, err) block.Block.StateRoot = stateRoot[:] @@ -122,7 +121,7 @@ func TestExecuteStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *test block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32) wsb, err = blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb) + _, _, err = transition.ExecuteStateTransitionNoVerifyAnySig(t.Context(), beaconState, wsb) require.ErrorContains(t, "could not validate state root", err) } @@ -130,7 +129,7 @@ func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) { beaconState, block, _, _, _ := createFullBlockWithOperations(t) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - set, _, err := transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb) + set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb) require.NoError(t, err) // Test Signature set verifies. verified, err := set.Verify() @@ -142,9 +141,9 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) { beaconState, block := createFullAltairBlockWithOperations(t) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot()) + beaconState, err = transition.ProcessSlots(t.Context(), beaconState, wsb.Block().Slot()) require.NoError(t, err) - set, _, err := transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb) + set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb) require.NoError(t, err) verified, err := set.Verify() require.NoError(t, err) @@ -155,7 +154,7 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) { beaconState, block, _, _, _ := createFullBlockWithOperations(t) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - set, _, err := transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb) + set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb) require.NoError(t, err) assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up") assert.Equal(t, "block signature", set.Descriptions[0]) @@ -167,9 +166,9 @@ func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) { beaconState, block := createFullAltairBlockWithOperations(t) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot()) + beaconState, err = transition.ProcessSlots(t.Context(), beaconState, wsb.Block().Slot()) require.NoError(t, err) - _, err = transition.ProcessOperationsNoVerifyAttsSigs(context.Background(), beaconState, wsb.Block()) + _, err = transition.ProcessOperationsNoVerifyAttsSigs(t.Context(), beaconState, wsb.Block()) require.NoError(t, err) } @@ -177,9 +176,9 @@ func TestProcessOperationsNoVerifyAttsSigsBellatrix_OK(t *testing.T) { beaconState, block := createFullBellatrixBlockWithOperations(t) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot()) + beaconState, err = transition.ProcessSlots(t.Context(), beaconState, wsb.Block().Slot()) require.NoError(t, err) - _, err = transition.ProcessOperationsNoVerifyAttsSigs(context.Background(), beaconState, wsb.Block()) + _, err = transition.ProcessOperationsNoVerifyAttsSigs(t.Context(), beaconState, wsb.Block()) require.NoError(t, err) } @@ -187,9 +186,9 @@ func TestProcessOperationsNoVerifyAttsSigsCapella_OK(t *testing.T) { beaconState, block := createFullCapellaBlockWithOperations(t) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ProcessSlots(context.Background(), beaconState, wsb.Block().Slot()) + beaconState, err = transition.ProcessSlots(t.Context(), beaconState, wsb.Block().Slot()) require.NoError(t, err) - _, err = transition.ProcessOperationsNoVerifyAttsSigs(context.Background(), beaconState, wsb.Block()) + _, err = transition.ProcessOperationsNoVerifyAttsSigs(t.Context(), beaconState, wsb.Block()) require.NoError(t, err) } @@ -197,7 +196,7 @@ func TestCalculateStateRootAltair_OK(t *testing.T) { beaconState, block := createFullAltairBlockWithOperations(t) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - r, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb) + r, err := transition.CalculateStateRoot(t.Context(), beaconState, wsb) require.NoError(t, err) require.DeepNotEqual(t, params.BeaconConfig().ZeroHash, r) } @@ -207,6 +206,6 @@ func TestProcessBlockDifferentVersion(t *testing.T) { _, block := createFullAltairBlockWithOperations(t) wsb, err := blocks.NewSignedBeaconBlock(block) // Altair block require.NoError(t, err) - _, _, err = transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb) + _, _, err = transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb) require.ErrorContains(t, "state and block are different version. 0 != 1", err) } diff --git a/beacon-chain/core/transition/transition_test.go b/beacon-chain/core/transition/transition_test.go index f4124e4cd4..42a24d5e98 100644 --- a/beacon-chain/core/transition/transition_test.go +++ b/beacon-chain/core/transition/transition_test.go @@ -1,7 +1,6 @@ package transition_test import ( - "context" "fmt" "testing" @@ -46,7 +45,7 @@ func TestExecuteStateTransition_IncorrectSlot(t *testing.T) { want := "expected state.slot" wsb, err := consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + _, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) assert.ErrorContains(t, want, err) } @@ -76,11 +75,11 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) { require.NoError(t, err) require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1)) - nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1) + nextSlotState, err := transition.ProcessSlots(t.Context(), beaconState.Copy(), beaconState.Slot()+1) require.NoError(t, err) parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot() require.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), nextSlotState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), nextSlotState) require.NoError(t, err) block := util.NewBeaconBlock() block.Block.ProposerIndex = proposerIdx @@ -91,7 +90,7 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) { wsb, err := consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - stateRoot, err := transition.CalculateStateRoot(context.Background(), beaconState, wsb) + stateRoot, err := transition.CalculateStateRoot(t.Context(), beaconState, wsb) require.NoError(t, err) block.Block.StateRoot = stateRoot[:] @@ -102,7 +101,7 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) { wsb, err = consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + beaconState, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) require.NoError(t, err) assert.Equal(t, params.BeaconConfig().SlotsPerEpoch, beaconState.Slot(), "Unexpected Slot number") @@ -191,7 +190,7 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) { require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{})) wsb, err := consensusblocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = transition.VerifyOperationLengths(context.Background(), beaconState, wsb.Block()) + _, err = transition.VerifyOperationLengths(t.Context(), beaconState, wsb.Block()) wanted := "number of voluntary exits (17) in block body exceeds allowed threshold of 16" assert.ErrorContains(t, wanted, err) } @@ -309,7 +308,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState, AggregationBits: aggBits, }) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, blockAtt.Data.Slot, blockAtt.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, blockAtt.Data.Slot, blockAtt.Data.CommitteeIndex) assert.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(blockAtt, committee) require.NoError(t, err) @@ -333,7 +332,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState, require.NoError(t, err) header := beaconState.LatestBlockHeader() - prevStateRoot, err := beaconState.HashTreeRoot(context.Background()) + prevStateRoot, err := beaconState.HashTreeRoot(t.Context()) require.NoError(t, err) header.StateRoot = prevStateRoot[:] require.NoError(t, beaconState.SetLatestBlockHeader(header)) @@ -343,7 +342,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState, require.NoError(t, copied.SetSlot(beaconState.Slot()+1)) randaoReveal, err := util.RandaoReveal(copied, currentEpoch, privKeys) require.NoError(t, err) - proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), copied) + proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), copied) require.NoError(t, err) block := util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{ Block: ðpb.BeaconBlock{ @@ -394,7 +393,7 @@ func TestProcessEpochPrecompute_CanProcess(t *testing.T) { } s, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - newState, err := transition.ProcessEpochPrecompute(context.Background(), s) + newState, err := transition.ProcessEpochPrecompute(t.Context(), s) require.NoError(t, err) assert.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance") } @@ -414,7 +413,7 @@ func TestProcessBlock_OverMaxProposerSlashings(t *testing.T) { require.NoError(t, err) wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block()) + _, err = transition.VerifyOperationLengths(t.Context(), s, wsb.Block()) assert.ErrorContains(t, want, err) } @@ -433,7 +432,7 @@ func TestProcessBlock_OverMaxAttesterSlashings(t *testing.T) { require.NoError(t, err) wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block()) + _, err = transition.VerifyOperationLengths(t.Context(), s, wsb.Block()) assert.ErrorContains(t, want, err) } @@ -452,7 +451,7 @@ func TestProcessBlock_OverMaxAttesterSlashingsElectra(t *testing.T) { require.NoError(t, err) wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block()) + _, err = transition.VerifyOperationLengths(t.Context(), s, wsb.Block()) assert.ErrorContains(t, want, err) } @@ -470,7 +469,7 @@ func TestProcessBlock_OverMaxAttestations(t *testing.T) { require.NoError(t, err) wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block()) + _, err = transition.VerifyOperationLengths(t.Context(), s, wsb.Block()) assert.ErrorContains(t, want, err) } @@ -488,7 +487,7 @@ func TestProcessBlock_OverMaxAttestationsElectra(t *testing.T) { require.NoError(t, err) wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block()) + _, err = transition.VerifyOperationLengths(t.Context(), s, wsb.Block()) assert.ErrorContains(t, want, err) } @@ -507,7 +506,7 @@ func TestProcessBlock_OverMaxVoluntaryExits(t *testing.T) { require.NoError(t, err) wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block()) + _, err = transition.VerifyOperationLengths(t.Context(), s, wsb.Block()) assert.ErrorContains(t, want, err) } @@ -529,7 +528,7 @@ func TestProcessBlock_IncorrectDeposits(t *testing.T) { s.Eth1Data().DepositCount-s.Eth1DepositIndex(), len(b.Block.Body.Deposits)) wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - _, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block()) + _, err = transition.VerifyOperationLengths(t.Context(), s, wsb.Block()) assert.ErrorContains(t, want, err) } @@ -538,7 +537,7 @@ func TestProcessSlots_SameSlotAsParentState(t *testing.T) { parentState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: slot}) require.NoError(t, err) - _, err = transition.ProcessSlots(context.Background(), parentState, slot) + _, err = transition.ProcessSlots(t.Context(), parentState, slot) assert.ErrorContains(t, "expected state.slot 2 < slot 2", err) } @@ -547,7 +546,7 @@ func TestProcessSlots_LowerSlotAsParentState(t *testing.T) { parentState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: slot}) require.NoError(t, err) - _, err = transition.ProcessSlots(context.Background(), parentState, slot-1) + _, err = transition.ProcessSlots(t.Context(), parentState, slot-1) assert.ErrorContains(t, "expected state.slot 2 < slot 1", err) } @@ -559,7 +558,7 @@ func TestProcessSlots_ThroughAltairEpoch(t *testing.T) { params.OverrideBeaconConfig(conf) st, _ := util.DeterministicGenesisState(t, params.BeaconConfig().MaxValidatorsPerCommittee) - st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10) + st, err := transition.ProcessSlots(t.Context(), st, params.BeaconConfig().SlotsPerEpoch*10) require.NoError(t, err) require.Equal(t, version.Altair, st.Version()) @@ -595,7 +594,7 @@ func TestProcessSlots_OnlyAltairEpoch(t *testing.T) { st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*6)) - st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10) + st, err := transition.ProcessSlots(t.Context(), st, params.BeaconConfig().SlotsPerEpoch*10) require.NoError(t, err) require.Equal(t, version.Altair, st.Version()) @@ -632,7 +631,7 @@ func TestProcessSlots_OnlyBellatrixEpoch(t *testing.T) { st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee) require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*6)) require.Equal(t, version.Bellatrix, st.Version()) - st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10) + st, err := transition.ProcessSlots(t.Context(), st, params.BeaconConfig().SlotsPerEpoch*10) require.NoError(t, err) require.Equal(t, version.Bellatrix, st.Version()) @@ -667,7 +666,7 @@ func TestProcessSlots_ThroughBellatrixEpoch(t *testing.T) { params.OverrideBeaconConfig(conf) st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee) - st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10) + st, err := transition.ProcessSlots(t.Context(), st, params.BeaconConfig().SlotsPerEpoch*10) require.NoError(t, err) require.Equal(t, version.Bellatrix, st.Version()) @@ -682,7 +681,7 @@ func TestProcessSlots_ThroughDenebEpoch(t *testing.T) { params.OverrideBeaconConfig(conf) st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee) - st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10) + st, err := transition.ProcessSlots(t.Context(), st, params.BeaconConfig().SlotsPerEpoch*10) require.NoError(t, err) require.Equal(t, version.Deneb, st.Version()) require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot()) @@ -696,7 +695,7 @@ func TestProcessSlots_ThroughElectraEpoch(t *testing.T) { params.OverrideBeaconConfig(conf) st, _ := util.DeterministicGenesisStateDeneb(t, params.BeaconConfig().MaxValidatorsPerCommittee) - st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10) + st, err := transition.ProcessSlots(t.Context(), st, params.BeaconConfig().SlotsPerEpoch*10) require.NoError(t, err) require.Equal(t, version.Electra, st.Version()) require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot()) @@ -710,7 +709,7 @@ func TestProcessSlots_ThroughFuluEpoch(t *testing.T) { params.OverrideBeaconConfig(conf) st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee) - st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10) + st, err := transition.ProcessSlots(t.Context(), st, params.BeaconConfig().SlotsPerEpoch*10) require.NoError(t, err) require.Equal(t, version.Fulu, st.Version()) require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot()) @@ -719,13 +718,13 @@ func TestProcessSlots_ThroughFuluEpoch(t *testing.T) { func TestProcessSlotsUsingNextSlotCache(t *testing.T) { s, _ := util.DeterministicGenesisState(t, 1) r := []byte{'a'} - s, err := transition.ProcessSlotsUsingNextSlotCache(context.Background(), s, r, 5) + s, err := transition.ProcessSlotsUsingNextSlotCache(t.Context(), s, r, 5) require.NoError(t, err) require.Equal(t, primitives.Slot(5), s.Slot()) } func TestProcessSlotsConditionally(t *testing.T) { - ctx := context.Background() + ctx := t.Context() s, _ := util.DeterministicGenesisState(t, 1) t.Run("target slot below current slot", func(t *testing.T) { @@ -757,7 +756,7 @@ func BenchmarkProcessSlots_Capella(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1) + st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1) if err != nil { b.Fatalf("Failed to process slot %v", err) } @@ -771,7 +770,7 @@ func BenchmarkProcessSlots_Deneb(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1) + st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1) if err != nil { b.Fatalf("Failed to process slot %v", err) } @@ -785,7 +784,7 @@ func BenchmarkProcessSlots_Electra(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1) + st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1) if err != nil { b.Fatalf("Failed to process slot %v", err) } diff --git a/beacon-chain/core/validators/validator_test.go b/beacon-chain/core/validators/validator_test.go index 29be14154f..c1dfb366e8 100644 --- a/beacon-chain/core/validators/validator_test.go +++ b/beacon-chain/core/validators/validator_test.go @@ -1,7 +1,6 @@ package validators_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers" @@ -50,7 +49,7 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) { }} state, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, 0, 199, 1) + newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, 0, 199, 1) require.ErrorIs(t, err, validators.ErrValidatorAlreadyExited) require.Equal(t, exitEpoch, epoch) v, err := newState.ValidatorAtIndex(0) @@ -69,7 +68,7 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) { }} state, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 1) + newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 1) require.NoError(t, err) require.Equal(t, exitedEpoch+2, epoch) v, err := newState.ValidatorAtIndex(idx) @@ -89,7 +88,7 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) { }} state, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 4) + newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 4) require.NoError(t, err) require.Equal(t, exitedEpoch+3, epoch) @@ -111,7 +110,7 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) { }} state, err := state_native.InitializeFromProtoPhase0(base) require.NoError(t, err) - _, _, err = validators.InitiateValidatorExit(context.Background(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1) + _, _, err = validators.InitiateValidatorExit(t.Context(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1) require.ErrorContains(t, "addition overflows", err) } @@ -147,7 +146,7 @@ func TestInitiateValidatorExit_ProperExit_Electra(t *testing.T) { require.NoError(t, err) require.Equal(t, primitives.Gwei(0), ebtc) - newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, 0, 0) // exitQueueEpoch and churn are not used in electra + newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, 0, 0) // exitQueueEpoch and churn are not used in electra require.NoError(t, err) // Expect that the exit epoch is the next available epoch with max seed lookahead. @@ -187,11 +186,11 @@ func TestSlashValidator_OK(t *testing.T) { slashedIdx := primitives.ValidatorIndex(3) - proposer, err := helpers.BeaconProposerIndex(context.Background(), state) + proposer, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err, "Could not get proposer") proposerBal, err := state.BalanceAtIndex(proposer) require.NoError(t, err) - slashedState, err := validators.SlashValidator(context.Background(), state, slashedIdx) + slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx) require.NoError(t, err, "Could not slash validator") require.Equal(t, true, slashedState.Version() == version.Phase0) @@ -241,11 +240,11 @@ func TestSlashValidator_Electra(t *testing.T) { slashedIdx := primitives.ValidatorIndex(3) - proposer, err := helpers.BeaconProposerIndex(context.Background(), state) + proposer, err := helpers.BeaconProposerIndex(t.Context(), state) require.NoError(t, err, "Could not get proposer") proposerBal, err := state.BalanceAtIndex(proposer) require.NoError(t, err) - slashedState, err := validators.SlashValidator(context.Background(), state, slashedIdx) + slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx) require.NoError(t, err, "Could not slash validator") require.Equal(t, true, slashedState.Version() == version.Electra) diff --git a/beacon-chain/das/availability_blobs_test.go b/beacon-chain/das/availability_blobs_test.go index 3cf6444c46..982de65ec3 100644 --- a/beacon-chain/das/availability_blobs_test.go +++ b/beacon-chain/das/availability_blobs_test.go @@ -113,7 +113,7 @@ func Test_commitmentsToCheck(t *testing.T) { } func TestLazilyPersistent_Missing(t *testing.T) { - ctx := context.Background() + ctx := t.Context() store := filesystem.NewEphemeralBlobStorage(t) blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3) @@ -140,7 +140,7 @@ func TestLazilyPersistent_Missing(t *testing.T) { } func TestLazilyPersistent_Mismatch(t *testing.T) { - ctx := context.Background() + ctx := t.Context() store := filesystem.NewEphemeralBlobStorage(t) blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3) diff --git a/beacon-chain/das/availability_columns_test.go b/beacon-chain/das/availability_columns_test.go index 3e5eabb041..87f4d2eb9d 100644 --- a/beacon-chain/das/availability_columns_test.go +++ b/beacon-chain/das/availability_columns_test.go @@ -104,7 +104,7 @@ func TestIsDataAvailable(t *testing.T) { return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars} } - ctx := context.Background() + ctx := t.Context() t.Run("without commitments", func(t *testing.T) { signedBeaconBlockFulu := util.NewBeaconBlockFulu() diff --git a/beacon-chain/db/filesystem/data_column_test.go b/beacon-chain/db/filesystem/data_column_test.go index 14e5d40064..947bcc9d4c 100644 --- a/beacon-chain/db/filesystem/data_column_test.go +++ b/beacon-chain/db/filesystem/data_column_test.go @@ -1,7 +1,6 @@ package filesystem import ( - "context" "encoding/binary" "os" "testing" @@ -15,7 +14,7 @@ import ( ) func TestNewDataColumnStorage(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("No base path", func(t *testing.T) { _, err := NewDataColumnStorage(ctx) @@ -33,7 +32,7 @@ func TestNewDataColumnStorage(t *testing.T) { func TestWarmCache(t *testing.T) { storage, err := NewDataColumnStorage( - context.Background(), + t.Context(), WithDataColumnBasePath(t.TempDir()), WithDataColumnRetentionEpochs(10_000), ) @@ -618,7 +617,7 @@ func TestStorageIndicesSet(t *testing.T) { func TestPrune(t *testing.T) { t.Run(("nothing to prune"), func(t *testing.T) { dir := t.TempDir() - dataColumnStorage, err := NewDataColumnStorage(context.Background(), WithDataColumnBasePath(dir)) + dataColumnStorage, err := NewDataColumnStorage(t.Context(), WithDataColumnBasePath(dir)) require.NoError(t, err) dataColumnStorage.prune() @@ -673,7 +672,7 @@ func TestPrune(t *testing.T) { ) dir := t.TempDir() - dataColumnStorage, err := NewDataColumnStorage(context.Background(), WithDataColumnBasePath(dir), WithDataColumnRetentionEpochs(10_000)) + dataColumnStorage, err := NewDataColumnStorage(t.Context(), WithDataColumnBasePath(dir), WithDataColumnRetentionEpochs(10_000)) require.NoError(t, err) err = dataColumnStorage.Save(verifiedRoDataColumnSidecars) diff --git a/beacon-chain/db/kv/archived_point_test.go b/beacon-chain/db/kv/archived_point_test.go index a0e26a9f5e..afc160e115 100644 --- a/beacon-chain/db/kv/archived_point_test.go +++ b/beacon-chain/db/kv/archived_point_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/consensus-types/primitives" @@ -12,7 +11,7 @@ import ( func TestArchivedPointIndexRoot_CanSaveRetrieve(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() i1 := primitives.Slot(100) r1 := [32]byte{'A'} @@ -28,7 +27,7 @@ func TestArchivedPointIndexRoot_CanSaveRetrieve(t *testing.T) { func TestLastArchivedPoint_CanRetrieve(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() i, err := db.LastArchivedSlot(ctx) require.NoError(t, err) assert.Equal(t, primitives.Slot(0), i, "Did not get correct index") diff --git a/beacon-chain/db/kv/backfill_test.go b/beacon-chain/db/kv/backfill_test.go index 0eecde9a70..8878c9f303 100644 --- a/beacon-chain/db/kv/backfill_test.go +++ b/beacon-chain/db/kv/backfill_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/encoding/bytesutil" @@ -24,7 +23,7 @@ func TestBackfillRoundtrip(t *testing.T) { require.DeepEqual(t, b.LowRoot, ub.LowRoot) require.DeepEqual(t, b.LowParentRoot, ub.LowParentRoot) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveBackfillStatus(ctx, b)) dbub, err := db.BackfillStatus(ctx) require.NoError(t, err) diff --git a/beacon-chain/db/kv/backup_test.go b/beacon-chain/db/kv/backup_test.go index ce28c96b7f..b8fdcf7aac 100644 --- a/beacon-chain/db/kv/backup_test.go +++ b/beacon-chain/db/kv/backup_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "os" "path/filepath" "testing" @@ -13,9 +12,9 @@ import ( ) func TestStore_Backup(t *testing.T) { - db, err := NewKVStore(context.Background(), t.TempDir()) + db, err := NewKVStore(t.Context(), t.TempDir()) require.NoError(t, err, "Failed to instantiate DB") - ctx := context.Background() + ctx := t.Context() head := util.NewBeaconBlock() head.Block.Slot = 5000 @@ -53,9 +52,9 @@ func TestStore_Backup(t *testing.T) { } func TestStore_BackupMultipleBuckets(t *testing.T) { - db, err := NewKVStore(context.Background(), t.TempDir()) + db, err := NewKVStore(t.Context(), t.TempDir()) require.NoError(t, err, "Failed to instantiate DB") - ctx := context.Background() + ctx := t.Context() startSlot := primitives.Slot(5000) diff --git a/beacon-chain/db/kv/blocks_test.go b/beacon-chain/db/kv/blocks_test.go index 97a670720f..c104d9ff48 100644 --- a/beacon-chain/db/kv/blocks_test.go +++ b/beacon-chain/db/kv/blocks_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "fmt" "testing" "time" @@ -156,7 +155,7 @@ var blockTests = []struct { func TestStore_SaveBlock_NoDuplicates(t *testing.T) { BlockCacheSize = 1 slot := primitives.Slot(20) - ctx := context.Background() + ctx := t.Context() for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { @@ -187,7 +186,7 @@ func TestStore_SaveBlock_NoDuplicates(t *testing.T) { } func TestStore_BlocksCRUD(t *testing.T) { - ctx := context.Background() + ctx := t.Context() for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { @@ -252,7 +251,7 @@ func TestStore_BlocksHandleZeroCase(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() numBlocks := 10 totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks) for i := 0; i < len(totalBlocks); i++ { @@ -275,7 +274,7 @@ func TestStore_BlocksHandleInvalidEndSlot(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() numBlocks := 10 totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks) // Save blocks from slot 1 onwards. @@ -302,7 +301,7 @@ func TestStore_BlocksHandleInvalidEndSlot(t *testing.T) { func TestStore_DeleteBlock(t *testing.T) { slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch) db := setupDB(t) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot)) blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot) @@ -349,7 +348,7 @@ func TestStore_DeleteBlock(t *testing.T) { func TestStore_DeleteJustifiedBlock(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() b := util.NewBeaconBlock() b.Block.Slot = 1 root, err := b.Block.HashTreeRoot() @@ -369,7 +368,7 @@ func TestStore_DeleteJustifiedBlock(t *testing.T) { func TestStore_DeleteFinalizedBlock(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() b := util.NewBeaconBlock() root, err := b.Block.HashTreeRoot() require.NoError(t, err) @@ -389,7 +388,7 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) { func TestStore_HistoricalDataBeforeSlot(t *testing.T) { slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch) - ctx := context.Background() + ctx := t.Context() tests := []struct { name string @@ -608,7 +607,7 @@ func TestStore_HistoricalDataBeforeSlot(t *testing.T) { func TestStore_GenesisBlock(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() genesisBlock := util.NewBeaconBlock() genesisBlock.Block.ParentRoot = bytesutil.PadTo([]byte{1, 2, 3}, 32) blockRoot, err := genesisBlock.Block.HashTreeRoot() @@ -628,7 +627,7 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() blk, err := tt.newBlock(primitives.Slot(20), bytesutil.PadTo([]byte{1, 2, 3}, 32)) require.NoError(t, err) blockRoot, err := blk.Block().HashTreeRoot() @@ -677,7 +676,7 @@ func TestStore_Blocks_FiltersCorrectly(t *testing.T) { b7, b8, } - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveBlocks(ctx, blocks)) tests := []struct { @@ -775,7 +774,7 @@ func testSlotSlice(start, end primitives.Slot) []primitives.Slot { func TestCleanupMissingBlockIndices(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := setupDB(t) chain := testBlockChain(t, tt.newBlock, testSlotSlice(1, 10), nil) require.NoError(t, db.SaveBlocks(ctx, chain)) @@ -805,7 +804,7 @@ func TestCleanupMissingBlockIndices(t *testing.T) { func TestCleanupMissingForkedBlockIndices(t *testing.T) { for _, tt := range blockTests[0:1] { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := setupDB(t) chain := testBlockChain(t, tt.newBlock, testSlotSlice(1, 10), nil) @@ -861,7 +860,7 @@ func TestCleanupMissingForkedBlockIndices(t *testing.T) { func TestStore_Blocks_VerifyBlockRoots(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := setupDB(t) b1, err := tt.newBlock(primitives.Slot(1), nil) require.NoError(t, err) @@ -894,7 +893,7 @@ func TestStore_Blocks_Retrieve_SlotRange(t *testing.T) { require.NoError(t, err) totalBlocks[i] = b } - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveBlocks(ctx, totalBlocks)) retrieved, _, err := db.Blocks(ctx, filters.NewFilter().SetStartSlot(100).SetEndSlot(399)) require.NoError(t, err) @@ -914,7 +913,7 @@ func TestStore_Blocks_Retrieve_Epoch(t *testing.T) { require.NoError(t, err) totalBlocks[i] = b } - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveBlocks(ctx, totalBlocks)) retrieved, _, err := db.Blocks(ctx, filters.NewFilter().SetStartEpoch(5).SetEndEpoch(6)) require.NoError(t, err) @@ -939,7 +938,7 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) { totalBlocks[i] = b } const step = 2 - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveBlocks(ctx, totalBlocks)) retrieved, _, err := db.Blocks(ctx, filters.NewFilter().SetStartSlot(100).SetEndSlot(399).SetSlotStep(step)) require.NoError(t, err) @@ -955,7 +954,7 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() block1, err := tt.newBlock(primitives.Slot(1), nil) require.NoError(t, err) @@ -1029,7 +1028,7 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() genesisBlock, err := tt.newBlock(primitives.Slot(0), nil) require.NoError(t, err) @@ -1099,7 +1098,7 @@ func TestStore_SaveBlocks_HasCachedBlocks(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() b := make([]interfaces.ReadOnlySignedBeaconBlock, 500) for i := 0; i < 500; i++ { @@ -1123,7 +1122,7 @@ func TestStore_SaveBlocks_HasRootsMatched(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() b := make([]interfaces.ReadOnlySignedBeaconBlock, 500) for i := 0; i < 500; i++ { @@ -1152,7 +1151,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) { for _, tt := range blockTests { t.Run(tt.name, func(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() b1, err := tt.newBlock(primitives.Slot(20), nil) require.NoError(t, err) @@ -1233,7 +1232,7 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) { func TestStore_FeeRecipientByValidatorID(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() ids := []primitives.ValidatorIndex{0, 0, 0} feeRecipients := []common.Address{{}, {}, {}, {}} require.ErrorContains(t, "validatorIDs and feeRecipients must be the same length", db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients)) @@ -1273,7 +1272,7 @@ func TestStore_FeeRecipientByValidatorID(t *testing.T) { func TestStore_RegistrationsByValidatorID(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() ids := []primitives.ValidatorIndex{0, 0, 0} regs := []*ethpb.ValidatorRegistrationV1{{}, {}, {}, {}} require.ErrorContains(t, "ids and registrations must be the same length", db.SaveRegistrationsByValidatorIDs(ctx, ids, regs)) diff --git a/beacon-chain/db/kv/checkpoint_test.go b/beacon-chain/db/kv/checkpoint_test.go index 0a54799c9f..48c87142b5 100644 --- a/beacon-chain/db/kv/checkpoint_test.go +++ b/beacon-chain/db/kv/checkpoint_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -16,7 +15,7 @@ import ( func TestStore_JustifiedCheckpoint_CanSaveRetrieve(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() root := bytesutil.ToBytes32([]byte{'A'}) cp := ðpb.Checkpoint{ Epoch: 10, @@ -35,7 +34,7 @@ func TestStore_JustifiedCheckpoint_CanSaveRetrieve(t *testing.T) { func TestStore_JustifiedCheckpoint_Recover(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() blk := util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{}) r, err := blk.Block.HashTreeRoot() require.NoError(t, err) @@ -54,7 +53,7 @@ func TestStore_JustifiedCheckpoint_Recover(t *testing.T) { func TestStore_FinalizedCheckpoint_CanSaveRetrieve(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() genesis := bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'}) require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesis)) @@ -90,7 +89,7 @@ func TestStore_FinalizedCheckpoint_CanSaveRetrieve(t *testing.T) { func TestStore_FinalizedCheckpoint_Recover(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() blk := util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{}) r, err := blk.Block.HashTreeRoot() require.NoError(t, err) @@ -110,7 +109,7 @@ func TestStore_FinalizedCheckpoint_Recover(t *testing.T) { func TestStore_JustifiedCheckpoint_DefaultIsZeroHash(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() cp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} retrieved, err := db.JustifiedCheckpoint(ctx) @@ -120,7 +119,7 @@ func TestStore_JustifiedCheckpoint_DefaultIsZeroHash(t *testing.T) { func TestStore_FinalizedCheckpoint_DefaultIsZeroHash(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() cp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} retrieved, err := db.FinalizedCheckpoint(ctx) @@ -130,7 +129,7 @@ func TestStore_FinalizedCheckpoint_DefaultIsZeroHash(t *testing.T) { func TestStore_FinalizedCheckpoint_StateMustExist(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() cp := ðpb.Checkpoint{ Epoch: 5, Root: []byte{'B'}, diff --git a/beacon-chain/db/kv/deposit_contract_test.go b/beacon-chain/db/kv/deposit_contract_test.go index 1904bbe47f..e5b2f6b60c 100644 --- a/beacon-chain/db/kv/deposit_contract_test.go +++ b/beacon-chain/db/kv/deposit_contract_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/testing/assert" @@ -11,7 +10,7 @@ import ( func TestStore_DepositContract(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() contractAddress := common.Address{1, 2, 3} retrieved, err := db.DepositContractAddress(ctx) require.NoError(t, err) diff --git a/beacon-chain/db/kv/encoding_test.go b/beacon-chain/db/kv/encoding_test.go index bb7701f832..cfcf04e964 100644 --- a/beacon-chain/db/kv/encoding_test.go +++ b/beacon-chain/db/kv/encoding_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" testpb "github.com/OffchainLabs/prysm/v6/proto/testing" @@ -12,6 +11,6 @@ func Test_encode_handlesNilFromFunction(t *testing.T) { foo := func() *testpb.Puzzle { return nil } - _, err := encode(context.Background(), foo()) + _, err := encode(t.Context(), foo()) require.ErrorContains(t, "cannot encode nil message", err) } diff --git a/beacon-chain/db/kv/execution_chain_test.go b/beacon-chain/db/kv/execution_chain_test.go index 7be6ec7dbf..d7e8bdf9e0 100644 --- a/beacon-chain/db/kv/execution_chain_test.go +++ b/beacon-chain/db/kv/execution_chain_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" v2 "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" @@ -27,7 +26,7 @@ func TestStore_SavePowchainData(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { store := setupDB(t) - if err := store.SaveExecutionChainData(context.Background(), tt.args.data); (err != nil) != tt.wantErr { + if err := store.SaveExecutionChainData(t.Context(), tt.args.data); (err != nil) != tt.wantErr { t.Errorf("SaveExecutionChainData() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/beacon-chain/db/kv/finalized_block_roots_test.go b/beacon-chain/db/kv/finalized_block_roots_test.go index 9722014066..dc4aabc612 100644 --- a/beacon-chain/db/kv/finalized_block_roots_test.go +++ b/beacon-chain/db/kv/finalized_block_roots_test.go @@ -23,7 +23,7 @@ var genesisBlockRoot = bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', func TestStore_IsFinalizedBlock(t *testing.T) { slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch) db := setupDB(t) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot)) @@ -59,7 +59,7 @@ func TestStore_IsFinalizedBlock(t *testing.T) { func TestStore_IsFinalizedBlockGenesis(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() blk := util.NewBeaconBlock() blk.Block.Slot = 0 @@ -94,7 +94,7 @@ func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) { blocks2 := makeBlocks(t, slotsPerEpoch*2, slotsPerEpoch, bytesutil.ToBytes32(sszRootOrDie(t, blocks1[len(blocks1)-1]))) db := setupDB(t) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot)) require.NoError(t, db.SaveBlocks(ctx, blocks0)) @@ -142,7 +142,7 @@ func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) { func TestStore_IsFinalizedChildBlock(t *testing.T) { slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch) - ctx := context.Background() + ctx := t.Context() eval := func(t testing.TB, ctx context.Context, db *Store, blks []interfaces.ReadOnlySignedBeaconBlock) { require.NoError(t, db.SaveBlocks(ctx, blks)) @@ -239,7 +239,7 @@ func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte) func TestStore_BackfillFinalizedIndexSingle(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() // we're making 4 blocks so we can test an element without a valid child at the end blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 4, [32]byte{})) require.NoError(t, err) @@ -283,7 +283,7 @@ func TestStore_BackfillFinalizedIndexSingle(t *testing.T) { func TestStore_BackfillFinalizedIndex(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() require.ErrorIs(t, db.BackfillFinalizedIndex(ctx, []consensusblocks.ROBlock{}, [32]byte{}), errEmptyBlockSlice) blks, err := consensusblocks.NewROBlockSlice(makeBlocks(t, 0, 66, [32]byte{})) require.NoError(t, err) diff --git a/beacon-chain/db/kv/genesis_test.go b/beacon-chain/db/kv/genesis_test.go index 38825b2903..3ed25f3b1f 100644 --- a/beacon-chain/db/kv/genesis_test.go +++ b/beacon-chain/db/kv/genesis_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "encoding/hex" "os" "testing" @@ -16,7 +15,7 @@ import ( ) func TestStore_SaveGenesisData(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := setupDB(t) gs, err := util.NewBeaconState() @@ -28,7 +27,7 @@ func TestStore_SaveGenesisData(t *testing.T) { } func testGenesisDataSaved(t *testing.T, db iface.Database) { - ctx := context.Background() + ctx := t.Context() gb, err := db.GenesisBlock(ctx) require.NoError(t, err) @@ -77,7 +76,7 @@ func TestLoadCapellaFromFile(t *testing.T) { require.NoError(t, err) db := setupDB(t) - require.NoError(t, db.LoadGenesis(context.Background(), sb)) + require.NoError(t, db.LoadGenesis(t.Context(), sb)) testGenesisDataSaved(t, db) } @@ -119,12 +118,12 @@ func TestLoadGenesisFromFile(t *testing.T) { require.NoError(t, err) db := setupDB(t) - require.NoError(t, db.LoadGenesis(context.Background(), sb)) + require.NoError(t, db.LoadGenesis(t.Context(), sb)) testGenesisDataSaved(t, db) // Loading the same genesis again should not throw an error require.NoError(t, err) - require.NoError(t, db.LoadGenesis(context.Background(), sb)) + require.NoError(t, db.LoadGenesis(t.Context(), sb)) testGenesisDataSaved(t, db) } @@ -139,7 +138,7 @@ func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) { // Loading a genesis with the wrong fork version as beacon config should throw an error. db := setupDB(t) - assert.ErrorContains(t, "not found in any known fork choice schedule", db.LoadGenesis(context.Background(), sb)) + assert.ErrorContains(t, "not found in any known fork choice schedule", db.LoadGenesis(t.Context(), sb)) } func TestEnsureEmbeddedGenesis(t *testing.T) { @@ -153,7 +152,7 @@ func TestEnsureEmbeddedGenesis(t *testing.T) { require.NoError(t, undo()) }() - ctx := context.Background() + ctx := t.Context() db := setupDB(t) gb, err := db.GenesisBlock(ctx) diff --git a/beacon-chain/db/kv/kv_test.go b/beacon-chain/db/kv/kv_test.go index db485fc9c6..c5eb010740 100644 --- a/beacon-chain/db/kv/kv_test.go +++ b/beacon-chain/db/kv/kv_test.go @@ -16,16 +16,19 @@ import ( // setupDB instantiates and returns a Store instance. func setupDB(t testing.TB) *Store { - db, err := NewKVStore(context.Background(), t.TempDir()) + db, err := NewKVStore(t.Context(), t.TempDir()) require.NoError(t, err, "Failed to instantiate DB") t.Cleanup(func() { - require.NoError(t, db.Close(), "Failed to close database") + err := db.Close() + if err != context.Canceled { + require.NoError(t, err, "Failed to close database") + } }) return db } func Test_setupBlockStorageType(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("fresh database with feature enabled to store full blocks should store full blocks", func(t *testing.T) { resetFn := features.InitWithReset(&features.Flags{ SaveFullExecutionPayloads: true, diff --git a/beacon-chain/db/kv/lightclient_test.go b/beacon-chain/db/kv/lightclient_test.go index e045ed0d40..4c194d1b3c 100644 --- a/beacon-chain/db/kv/lightclient_test.go +++ b/beacon-chain/db/kv/lightclient_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "fmt" "math/rand" "testing" @@ -200,7 +199,7 @@ func TestStore_LightClientUpdate_CanSaveRetrieve(t *testing.T) { params.OverrideBeaconConfig(cfg) db := setupDB(t) - ctx := context.Background() + ctx := t.Context() t.Run("Altair", func(t *testing.T) { update, err := createUpdate(t, version.Altair) @@ -262,7 +261,7 @@ func TestStore_LightClientUpdate_CanSaveRetrieve(t *testing.T) { func TestStore_LightClientUpdates_canRetrieveRange(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() updates := make([]interfaces.LightClientUpdate, 0, 3) for i := 1; i <= 3; i++ { update, err := createUpdate(t, version.Altair) @@ -287,7 +286,7 @@ func TestStore_LightClientUpdates_canRetrieveRange(t *testing.T) { func TestStore_LightClientUpdate_EndPeriodSmallerThanStartPeriod(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() updates := make([]interfaces.LightClientUpdate, 0, 3) for i := 1; i <= 3; i++ { update, err := createUpdate(t, version.Altair) @@ -310,7 +309,7 @@ func TestStore_LightClientUpdate_EndPeriodSmallerThanStartPeriod(t *testing.T) { func TestStore_LightClientUpdate_EndPeriodEqualToStartPeriod(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() updates := make([]interfaces.LightClientUpdate, 0, 3) for i := 1; i <= 3; i++ { update, err := createUpdate(t, version.Altair) @@ -332,7 +331,7 @@ func TestStore_LightClientUpdate_EndPeriodEqualToStartPeriod(t *testing.T) { func TestStore_LightClientUpdate_StartPeriodBeforeFirstUpdate(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() updates := make([]interfaces.LightClientUpdate, 0, 3) for i := 1; i <= 3; i++ { update, err := createUpdate(t, version.Altair) @@ -356,7 +355,7 @@ func TestStore_LightClientUpdate_StartPeriodBeforeFirstUpdate(t *testing.T) { func TestStore_LightClientUpdate_EndPeriodAfterLastUpdate(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() updates := make([]interfaces.LightClientUpdate, 0, 3) for i := 1; i <= 3; i++ { update, err := createUpdate(t, version.Altair) @@ -380,7 +379,7 @@ func TestStore_LightClientUpdate_EndPeriodAfterLastUpdate(t *testing.T) { func TestStore_LightClientUpdate_PartialUpdates(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() updates := make([]interfaces.LightClientUpdate, 0, 3) for i := 1; i <= 3; i++ { update, err := createUpdate(t, version.Altair) @@ -404,7 +403,7 @@ func TestStore_LightClientUpdate_PartialUpdates(t *testing.T) { func TestStore_LightClientUpdate_MissingPeriods_SimpleData(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() updates := make([]interfaces.LightClientUpdate, 0, 4) for i := 1; i <= 4; i++ { update, err := createUpdate(t, version.Altair) @@ -449,7 +448,7 @@ func TestStore_LightClientUpdate_MissingPeriods_SimpleData(t *testing.T) { func TestStore_LightClientUpdate_EmptyDB(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() // Retrieve the updates retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 3) @@ -459,7 +458,7 @@ func TestStore_LightClientUpdate_EmptyDB(t *testing.T) { func TestStore_LightClientUpdate_RetrieveMissingPeriodDistributed(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() updates := make([]interfaces.LightClientUpdate, 0, 5) for i := 1; i <= 5; i++ { update, err := createUpdate(t, version.Altair) @@ -592,7 +591,7 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) { params.OverrideBeaconConfig(cfg) db := setupDB(t) - ctx := context.Background() + ctx := t.Context() t.Run("Nil", func(t *testing.T) { retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("NilBlockRoot")) @@ -696,7 +695,7 @@ func TestStore_LightClientBootstrap_MultipleBootstrapsWithSameSyncCommittee(t *t params.OverrideBeaconConfig(cfg) db := setupDB(t) - ctx := context.Background() + ctx := t.Context() bootstrap1, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().AltairForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch))) require.NoError(t, err) @@ -758,7 +757,7 @@ func TestStore_LightClientBootstrap_MultipleBootstrapsWithDifferentSyncCommittee params.OverrideBeaconConfig(cfg) db := setupDB(t) - ctx := context.Background() + ctx := t.Context() bootstrap1, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().AltairForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch))) require.NoError(t, err) diff --git a/beacon-chain/db/kv/migration_archived_index_test.go b/beacon-chain/db/kv/migration_archived_index_test.go index 888a1276c2..87c2f359b4 100644 --- a/beacon-chain/db/kv/migration_archived_index_test.go +++ b/beacon-chain/db/kv/migration_archived_index_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/encoding/bytesutil" @@ -51,7 +50,7 @@ func Test_migrateArchivedIndex(t *testing.T) { } sb := util.NewBeaconBlock() sb.Block.Slot = 2048 - b, err := encode(context.Background(), sb) + b, err := encode(t.Context(), sb) if err != nil { return err } @@ -95,7 +94,7 @@ func Test_migrateArchivedIndex(t *testing.T) { t.Run(tt.name, func(t *testing.T) { db := setupDB(t).db tt.setup(t, db) - assert.NoError(t, migrateArchivedIndex(context.Background(), db), "migrateArchivedIndex(tx) error") + assert.NoError(t, migrateArchivedIndex(t.Context(), db), "migrateArchivedIndex(tx) error") tt.eval(t, db) }) } diff --git a/beacon-chain/db/kv/migration_block_slot_index_test.go b/beacon-chain/db/kv/migration_block_slot_index_test.go index ff87902005..f38e3d50a4 100644 --- a/beacon-chain/db/kv/migration_block_slot_index_test.go +++ b/beacon-chain/db/kv/migration_block_slot_index_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/encoding/bytesutil" @@ -58,7 +57,7 @@ func Test_migrateBlockSlotIndex(t *testing.T) { t.Run(tt.name, func(t *testing.T) { db := setupDB(t).db tt.setup(t, db) - assert.NoError(t, migrateBlockSlotIndex(context.Background(), db), "migrateBlockSlotIndex(tx) error") + assert.NoError(t, migrateBlockSlotIndex(t.Context(), db), "migrateBlockSlotIndex(tx) error") tt.eval(t, db) }) } diff --git a/beacon-chain/db/kv/migration_state_validators_test.go b/beacon-chain/db/kv/migration_state_validators_test.go index fb4f176c7c..5fb902355a 100644 --- a/beacon-chain/db/kv/migration_state_validators_test.go +++ b/beacon-chain/db/kv/migration_state_validators_test.go @@ -2,7 +2,6 @@ package kv import ( "bytes" - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/state" @@ -81,7 +80,7 @@ func Test_migrateStateValidators(t *testing.T) { assert.NoError(t, err) assert.NoError(t, st.SetSlot(101)) assert.NoError(t, st.SetValidators(newValidators)) - assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot)) + assert.NoError(t, dbStore.SaveState(t.Context(), st, blockRoot)) assert.NoError(t, err) // now check if this newly saved state followed the migrated code path @@ -136,7 +135,7 @@ func Test_migrateStateValidators(t *testing.T) { // check if the migration worked blockRoot := [32]byte{'A'} - rcvdState, err := dbStore.State(context.Background(), blockRoot) + rcvdState, err := dbStore.State(t.Context(), blockRoot) assert.NoError(t, err) require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching") @@ -191,7 +190,7 @@ func Test_migrateStateValidators(t *testing.T) { assert.NoError(t, err) assert.NoError(t, st.SetSlot(100)) assert.NoError(t, st.SetValidators(vals)) - assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot)) + assert.NoError(t, dbStore.SaveState(t.Context(), st, blockRoot)) assert.NoError(t, err) // enable historical state representation flag to test this @@ -201,7 +200,7 @@ func Test_migrateStateValidators(t *testing.T) { defer resetCfg() tt.setup(t, dbStore, st, vals) - assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error") + assert.NoError(t, migrateStateValidators(t.Context(), dbStore.db), "migrateArchivedIndex(tx) error") tt.eval(t, dbStore, st, vals) }) } @@ -239,7 +238,7 @@ func Test_migrateAltairStateValidators(t *testing.T) { // check if the migration worked blockRoot := [32]byte{'A'} - rcvdState, err := dbStore.State(context.Background(), blockRoot) + rcvdState, err := dbStore.State(t.Context(), blockRoot) assert.NoError(t, err) require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching") @@ -299,7 +298,7 @@ func Test_migrateAltairStateValidators(t *testing.T) { require.NoError(t, err) assert.NoError(t, st.SetSlot(100)) assert.NoError(t, st.SetValidators(vals)) - assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot)) + assert.NoError(t, dbStore.SaveState(t.Context(), st, blockRoot)) // enable historical state representation flag to test this resetCfg := features.InitWithReset(&features.Flags{ @@ -308,7 +307,7 @@ func Test_migrateAltairStateValidators(t *testing.T) { defer resetCfg() tt.setup(t, dbStore, st, vals) - assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error") + assert.NoError(t, migrateStateValidators(t.Context(), dbStore.db), "migrateArchivedIndex(tx) error") tt.eval(t, dbStore, st, vals) }) } @@ -346,7 +345,7 @@ func Test_migrateBellatrixStateValidators(t *testing.T) { // check if the migration worked blockRoot := [32]byte{'A'} - rcvdState, err := dbStore.State(context.Background(), blockRoot) + rcvdState, err := dbStore.State(t.Context(), blockRoot) assert.NoError(t, err) require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching") @@ -406,7 +405,7 @@ func Test_migrateBellatrixStateValidators(t *testing.T) { require.NoError(t, err) assert.NoError(t, st.SetSlot(100)) assert.NoError(t, st.SetValidators(vals)) - assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot)) + assert.NoError(t, dbStore.SaveState(t.Context(), st, blockRoot)) // enable historical state representation flag to test this resetCfg := features.InitWithReset(&features.Flags{ @@ -415,7 +414,7 @@ func Test_migrateBellatrixStateValidators(t *testing.T) { defer resetCfg() tt.setup(t, dbStore, st, vals) - assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error") + assert.NoError(t, migrateStateValidators(t.Context(), dbStore.db), "migrateArchivedIndex(tx) error") tt.eval(t, dbStore, st, vals) }) } @@ -453,7 +452,7 @@ func Test_migrateCapellaStateValidators(t *testing.T) { // check if the migration worked blockRoot := [32]byte{'A'} - rcvdState, err := dbStore.State(context.Background(), blockRoot) + rcvdState, err := dbStore.State(t.Context(), blockRoot) assert.NoError(t, err) require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching") @@ -513,7 +512,7 @@ func Test_migrateCapellaStateValidators(t *testing.T) { require.NoError(t, err) assert.NoError(t, st.SetSlot(100)) assert.NoError(t, st.SetValidators(vals)) - assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot)) + assert.NoError(t, dbStore.SaveState(t.Context(), st, blockRoot)) // enable historical state representation flag to test this resetCfg := features.InitWithReset(&features.Flags{ @@ -522,7 +521,7 @@ func Test_migrateCapellaStateValidators(t *testing.T) { defer resetCfg() tt.setup(t, dbStore, st, vals) - assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error") + assert.NoError(t, migrateStateValidators(t.Context(), dbStore.db), "migrateArchivedIndex(tx) error") tt.eval(t, dbStore, st, vals) }) } diff --git a/beacon-chain/db/kv/state_summary_test.go b/beacon-chain/db/kv/state_summary_test.go index bf96ab3d43..b3a207a3a2 100644 --- a/beacon-chain/db/kv/state_summary_test.go +++ b/beacon-chain/db/kv/state_summary_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/consensus-types/primitives" @@ -13,7 +12,7 @@ import ( func TestStateSummary_CanSaveRetrieve(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() r1 := bytesutil.ToBytes32([]byte{'A'}) r2 := bytesutil.ToBytes32([]byte{'B'}) s1 := ðpb.StateSummary{Slot: 1, Root: r1[:]} @@ -48,24 +47,24 @@ func TestStateSummary_CacheToDB(t *testing.T) { summaries[i] = ðpb.StateSummary{Slot: primitives.Slot(i), Root: bytesutil.PadTo(bytesutil.Uint64ToBytesLittleEndian(uint64(i)), 32)} } - require.NoError(t, db.SaveStateSummaries(context.Background(), summaries)) + require.NoError(t, db.SaveStateSummaries(t.Context(), summaries)) require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount-1) - require.NoError(t, db.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1000, Root: []byte{'a', 'b'}})) + require.NoError(t, db.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1000, Root: []byte{'a', 'b'}})) require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount) - require.NoError(t, db.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1001, Root: []byte{'c', 'd'}})) + require.NoError(t, db.SaveStateSummary(t.Context(), ðpb.StateSummary{Slot: 1001, Root: []byte{'c', 'd'}})) require.Equal(t, db.stateSummaryCache.len(), 1) for i := range summaries { r := bytesutil.Uint64ToBytesLittleEndian(uint64(i)) - require.Equal(t, true, db.HasStateSummary(context.Background(), bytesutil.ToBytes32(r))) + require.Equal(t, true, db.HasStateSummary(t.Context(), bytesutil.ToBytes32(r))) } } func TestStateSummary_CanDelete(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() r1 := bytesutil.ToBytes32([]byte{'A'}) s1 := ðpb.StateSummary{Slot: 1, Root: r1[:]} diff --git a/beacon-chain/db/kv/state_test.go b/beacon-chain/db/kv/state_test.go index 77313c21ac..4e425e4c8d 100644 --- a/beacon-chain/db/kv/state_test.go +++ b/beacon-chain/db/kv/state_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "crypto/rand" "encoding/binary" mathRand "math/rand" @@ -26,7 +25,7 @@ import ( func TestStateNil(t *testing.T) { db := setupDB(t) - _, err := db.StateOrError(context.Background(), [32]byte{}) + _, err := db.StateOrError(t.Context(), [32]byte{}) require.ErrorIs(t, err, ErrNotFoundState) } @@ -171,13 +170,13 @@ func TestState_CanSaveRetrieve(t *testing.T) { rootNonce = '1' } root := bytesutil.ToBytes32([]byte{tc.rootSeed, rootNonce}) - require.Equal(t, false, db.HasState(context.Background(), root)) + require.Equal(t, false, db.HasState(t.Context(), root)) st := tc.s() - require.NoError(t, db.SaveState(context.Background(), st, root)) - assert.Equal(t, true, db.HasState(context.Background(), root)) + require.NoError(t, db.SaveState(t.Context(), st, root)) + assert.Equal(t, true, db.HasState(t.Context(), root)) - savedSt, err := db.State(context.Background(), root) + savedSt, err := db.State(t.Context(), root) require.NoError(t, err) assert.DeepSSZEqual(t, st.ToProtoUnsafe(), savedSt.ToProtoUnsafe()) @@ -199,7 +198,7 @@ func TestState_CanSaveRetrieveValidatorEntries(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) stateValidators := validators(10) st, err := util.NewBeaconState() @@ -207,11 +206,11 @@ func TestState_CanSaveRetrieveValidatorEntries(t *testing.T) { require.NoError(t, st.SetSlot(100)) require.NoError(t, st.SetValidators(stateValidators)) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveState(ctx, st, r)) - assert.Equal(t, true, db.HasState(context.Background(), r)) + assert.Equal(t, true, db.HasState(t.Context(), r)) - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching") @@ -252,18 +251,18 @@ func TestStateAltair_CanSaveRetrieveValidatorEntries(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) stateValidators := validators(10) st, _ := util.DeterministicGenesisStateAltair(t, 20) require.NoError(t, st.SetSlot(100)) require.NoError(t, st.SetValidators(stateValidators)) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveState(ctx, st, r)) - assert.Equal(t, true, db.HasState(context.Background(), r)) + assert.Equal(t, true, db.HasState(t.Context(), r)) - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching") @@ -304,7 +303,7 @@ func TestState_CanSaveRetrieveValidatorEntriesFromCache(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) stateValidators := validators(10) st, err := util.NewBeaconState() @@ -312,9 +311,9 @@ func TestState_CanSaveRetrieveValidatorEntriesFromCache(t *testing.T) { require.NoError(t, st.SetSlot(100)) require.NoError(t, st.SetValidators(stateValidators)) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveState(ctx, st, r)) - assert.Equal(t, true, db.HasState(context.Background(), r)) + assert.Equal(t, true, db.HasState(t.Context(), r)) // check if the state is in cache for i := 0; i < len(stateValidators); i++ { @@ -356,7 +355,7 @@ func TestState_CanSaveRetrieveValidatorEntriesWithoutCache(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) stateValidators := validators(10) st, err := util.NewBeaconState() @@ -364,12 +363,12 @@ func TestState_CanSaveRetrieveValidatorEntriesWithoutCache(t *testing.T) { require.NoError(t, st.SetSlot(100)) require.NoError(t, st.SetValidators(stateValidators)) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveState(ctx, st, r)) - assert.Equal(t, true, db.HasState(context.Background(), r)) + assert.Equal(t, true, db.HasState(t.Context(), r)) db.validatorEntryCache.Clear() - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching") @@ -412,8 +411,8 @@ func TestState_DeleteState(t *testing.T) { r1 := [32]byte{'A'} r2 := [32]byte{'B'} - require.Equal(t, false, db.HasState(context.Background(), r1)) - require.Equal(t, false, db.HasState(context.Background(), r2)) + require.Equal(t, false, db.HasState(t.Context(), r1)) + require.Equal(t, false, db.HasState(t.Context(), r2)) // create two states with the same set of validators. stateValidators := validators(10) @@ -428,7 +427,7 @@ func TestState_DeleteState(t *testing.T) { require.NoError(t, st2.SetValidators(stateValidators)) // save both the states. - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveState(ctx, st1, r1)) require.NoError(t, db.SaveState(ctx, st2, r2)) @@ -488,18 +487,18 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) { st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(1)) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), headRoot)) - require.NoError(t, db.SaveState(context.Background(), st, headRoot)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), headRoot)) + require.NoError(t, db.SaveState(t.Context(), st, headRoot)) - savedGenesisS, err := db.GenesisState(context.Background()) + savedGenesisS, err := db.GenesisState(t.Context()) require.NoError(t, err) assert.DeepSSZEqual(t, st.ToProtoUnsafe(), savedGenesisS.ToProtoUnsafe(), "Did not retrieve saved state") - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), [32]byte{'C'})) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), [32]byte{'C'})) } func TestStore_StatesBatchDelete(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() numBlocks := 100 totalBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, numBlocks) blockRoots := make([][32]byte, 0) @@ -515,7 +514,7 @@ func TestStore_StatesBatchDelete(t *testing.T) { st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(primitives.Slot(i))) - require.NoError(t, db.SaveState(context.Background(), st, r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) blockRoots = append(blockRoots, r) if i%2 == 0 { evenBlockRoots = append(evenBlockRoots, r) @@ -526,7 +525,7 @@ func TestStore_StatesBatchDelete(t *testing.T) { require.NoError(t, db.DeleteStates(ctx, evenBlockRoots)) // When we retrieve the data, only the odd indexed state should remain. for _, r := range blockRoots { - s, err := db.State(context.Background(), r) + s, err := db.State(t.Context(), r) require.NoError(t, err) if s == nil { continue @@ -537,7 +536,7 @@ func TestStore_StatesBatchDelete(t *testing.T) { func TestStore_DeleteGenesisState(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() genesisBlockRoot := [32]byte{'A'} require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot)) @@ -551,7 +550,7 @@ func TestStore_DeleteGenesisState(t *testing.T) { func TestStore_DeleteFinalizedState(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() genesis := bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'}) require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesis)) @@ -579,7 +578,7 @@ func TestStore_DeleteFinalizedState(t *testing.T) { func TestStore_DeleteHeadState(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() genesis := bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'}) require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesis)) @@ -610,47 +609,47 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(1)) s0 := st.ToProtoUnsafe() - require.NoError(t, db.SaveState(context.Background(), st, r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) b.Block.Slot = 100 r1, err := b.Block.HashTreeRoot() require.NoError(t, err) wsb, err = blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) st, err = util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(100)) s1 := st.ToProtoUnsafe() - require.NoError(t, db.SaveState(context.Background(), st, r1)) + require.NoError(t, db.SaveState(t.Context(), st, r1)) b.Block.Slot = 1000 r2, err := b.Block.HashTreeRoot() require.NoError(t, err) wsb, err = blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) st, err = util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(1000)) s2 := st.ToProtoUnsafe() - require.NoError(t, db.SaveState(context.Background(), st, r2)) + require.NoError(t, db.SaveState(t.Context(), st, r2)) - highest, err := db.HighestSlotStatesBelow(context.Background(), 2) + highest, err := db.HighestSlotStatesBelow(t.Context(), 2) require.NoError(t, err) assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), s0) - highest, err = db.HighestSlotStatesBelow(context.Background(), 101) + highest, err = db.HighestSlotStatesBelow(t.Context(), 101) require.NoError(t, err) assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), s1) - highest, err = db.HighestSlotStatesBelow(context.Background(), 1001) + highest, err = db.HighestSlotStatesBelow(t.Context(), 1001) require.NoError(t, err) assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), s2) } @@ -661,8 +660,8 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) { genesisState, err := util.NewBeaconState() require.NoError(t, err) genesisRoot := [32]byte{'a'} - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot)) - require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot)) + require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot)) b := util.NewBeaconBlock() b.Block.Slot = 1 @@ -670,21 +669,21 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(1)) - require.NoError(t, db.SaveState(context.Background(), st, r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) - highest, err := db.HighestSlotStatesBelow(context.Background(), 2) + highest, err := db.HighestSlotStatesBelow(t.Context(), 2) require.NoError(t, err) assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), st.ToProtoUnsafe()) - highest, err = db.HighestSlotStatesBelow(context.Background(), 1) + highest, err = db.HighestSlotStatesBelow(t.Context(), 1) require.NoError(t, err) assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), genesisState.ToProtoUnsafe()) - highest, err = db.HighestSlotStatesBelow(context.Background(), 0) + highest, err = db.HighestSlotStatesBelow(t.Context(), 0) require.NoError(t, err) assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), genesisState.ToProtoUnsafe()) } @@ -695,9 +694,9 @@ func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) { genesisState, err := util.NewBeaconState() require.NoError(t, err) genesisRoot := [32]byte{'a'} - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot)) - require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot)) - require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'})) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot)) + require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot)) + require.NoError(t, db.SaveOriginCheckpointBlockRoot(t.Context(), [32]byte{'a'})) bRoots := make([][32]byte, 0) slotsPerArchivedPoint := primitives.Slot(128) @@ -710,27 +709,27 @@ func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) bRoots = append(bRoots, r) prevRoot = r st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(i)) - require.NoError(t, db.SaveState(context.Background(), st, r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) } - require.NoError(t, db.SaveFinalizedCheckpoint(context.Background(), ðpb.Checkpoint{ + require.NoError(t, db.SaveFinalizedCheckpoint(t.Context(), ðpb.Checkpoint{ Root: bRoots[len(bRoots)-1][:], Epoch: primitives.Epoch(slotsPerArchivedPoint / params.BeaconConfig().SlotsPerEpoch), })) - require.NoError(t, db.CleanUpDirtyStates(context.Background(), slotsPerArchivedPoint)) + require.NoError(t, db.CleanUpDirtyStates(t.Context(), slotsPerArchivedPoint)) for i, root := range bRoots { if primitives.Slot(i) >= slotsPerArchivedPoint.SubSlot(slotsPerArchivedPoint.Div(3)) { - require.Equal(t, true, db.HasState(context.Background(), root)) + require.Equal(t, true, db.HasState(t.Context(), root)) } else { - require.Equal(t, false, db.HasState(context.Background(), root)) + require.Equal(t, false, db.HasState(t.Context(), root)) } } } @@ -741,9 +740,9 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) { genesisState, err := util.NewBeaconState() require.NoError(t, err) genesisRoot := [32]byte{'a'} - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot)) - require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot)) - require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'})) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot)) + require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot)) + require.NoError(t, db.SaveOriginCheckpointBlockRoot(t.Context(), [32]byte{'a'})) for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ { b := util.NewBeaconBlock() @@ -752,17 +751,17 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(i)) - require.NoError(t, db.SaveState(context.Background(), st, r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) } - require.NoError(t, db.SaveFinalizedCheckpoint(context.Background(), ðpb.Checkpoint{Root: genesisRoot[:]})) - require.NoError(t, db.CleanUpDirtyStates(context.Background(), params.BeaconConfig().SlotsPerEpoch)) - require.Equal(t, true, db.HasState(context.Background(), genesisRoot)) + require.NoError(t, db.SaveFinalizedCheckpoint(t.Context(), ðpb.Checkpoint{Root: genesisRoot[:]})) + require.NoError(t, db.CleanUpDirtyStates(t.Context(), params.BeaconConfig().SlotsPerEpoch)) + require.Equal(t, true, db.HasState(t.Context(), genesisRoot)) } func TestStore_CleanUpDirtyStates_OriginRoot(t *testing.T) { @@ -771,8 +770,8 @@ func TestStore_CleanUpDirtyStates_OriginRoot(t *testing.T) { genesisState, err := util.NewBeaconState() require.NoError(t, err) r := [32]byte{'a'} - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), r)) - require.NoError(t, db.SaveState(context.Background(), genesisState, r)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), r)) + require.NoError(t, db.SaveState(t.Context(), genesisState, r)) for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ { b := util.NewBeaconBlock() @@ -781,17 +780,17 @@ func TestStore_CleanUpDirtyStates_OriginRoot(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(i)) - require.NoError(t, db.SaveState(context.Background(), st, r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) } - require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), r)) - require.NoError(t, db.CleanUpDirtyStates(context.Background(), params.BeaconConfig().SlotsPerEpoch)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveOriginCheckpointBlockRoot(t.Context(), r)) + require.NoError(t, db.CleanUpDirtyStates(t.Context(), params.BeaconConfig().SlotsPerEpoch)) + require.Equal(t, true, db.HasState(t.Context(), r)) } func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) { @@ -800,9 +799,9 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) { genesisState, err := util.NewBeaconState() require.NoError(t, err) genesisRoot := [32]byte{'a'} - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot)) - require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot)) - require.NoError(t, db.SaveOriginCheckpointBlockRoot(context.Background(), [32]byte{'a'})) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot)) + require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot)) + require.NoError(t, db.SaveOriginCheckpointBlockRoot(t.Context(), [32]byte{'a'})) var unfinalizedRoots [][32]byte for i := primitives.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ { @@ -812,20 +811,20 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) unfinalizedRoots = append(unfinalizedRoots, r) st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(i)) - require.NoError(t, db.SaveState(context.Background(), st, r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) } - require.NoError(t, db.SaveFinalizedCheckpoint(context.Background(), ðpb.Checkpoint{Root: genesisRoot[:]})) - require.NoError(t, db.CleanUpDirtyStates(context.Background(), params.BeaconConfig().SlotsPerEpoch)) + require.NoError(t, db.SaveFinalizedCheckpoint(t.Context(), ðpb.Checkpoint{Root: genesisRoot[:]})) + require.NoError(t, db.CleanUpDirtyStates(t.Context(), params.BeaconConfig().SlotsPerEpoch)) for _, rt := range unfinalizedRoots { - require.Equal(t, true, db.HasState(context.Background(), rt)) + require.Equal(t, true, db.HasState(t.Context(), rt)) } } @@ -834,20 +833,20 @@ func TestAltairState_CanSaveRetrieve(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) st, _ := util.DeterministicGenesisStateAltair(t, 1) require.NoError(t, st.SetSlot(100)) - require.NoError(t, db.SaveState(context.Background(), st, r)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) + require.Equal(t, true, db.HasState(t.Context(), r)) - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe()) - savedS, err = db.State(context.Background(), [32]byte{'B'}) + savedS, err = db.State(t.Context(), [32]byte{'B'}) require.NoError(t, err) require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil") } @@ -857,16 +856,16 @@ func TestAltairState_CanDelete(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) st, _ := util.DeterministicGenesisStateAltair(t, 1) require.NoError(t, st.SetSlot(100)) - require.NoError(t, db.SaveState(context.Background(), st, r)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) + require.Equal(t, true, db.HasState(t.Context(), r)) - require.NoError(t, db.DeleteState(context.Background(), r)) - savedS, err := db.State(context.Background(), r) + require.NoError(t, db.DeleteState(t.Context(), r)) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil") } @@ -914,7 +913,7 @@ func checkStateSaveTime(b *testing.B, saveCount int) { mathRand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] }) require.NoError(b, st.SetValidators(allValidators)) - require.NoError(b, db.SaveState(context.Background(), st, bytesutil.ToBytes32(key))) + require.NoError(b, db.SaveState(b.Context(), st, bytesutil.ToBytes32(key))) } // create a state to save in benchmark @@ -926,7 +925,7 @@ func checkStateSaveTime(b *testing.B, saveCount int) { b.ReportAllocs() b.StartTimer() for i := 0; i < b.N; i++ { - require.NoError(b, db.SaveState(context.Background(), st, r)) + require.NoError(b, db.SaveState(b.Context(), st, r)) } } @@ -941,7 +940,7 @@ func checkStateReadTime(b *testing.B, saveCount int) { st, err := util.NewBeaconState() require.NoError(b, err) require.NoError(b, st.SetValidators(initialSetOfValidators)) - require.NoError(b, db.SaveState(context.Background(), st, r)) + require.NoError(b, db.SaveState(b.Context(), st, r)) // construct some states and save to randomize benchmark. for i := 0; i < saveCount; i++ { @@ -960,13 +959,13 @@ func checkStateReadTime(b *testing.B, saveCount int) { mathRand.Shuffle(len(allValidators), func(i, j int) { allValidators[i], allValidators[j] = allValidators[j], allValidators[i] }) require.NoError(b, st.SetValidators(allValidators)) - require.NoError(b, db.SaveState(context.Background(), st, bytesutil.ToBytes32(key))) + require.NoError(b, db.SaveState(b.Context(), st, bytesutil.ToBytes32(key))) } b.ReportAllocs() b.StartTimer() for i := 0; i < b.N; i++ { - _, err := db.State(context.Background(), r) + _, err := db.State(b.Context(), r) require.NoError(b, err) } } @@ -982,18 +981,18 @@ func TestStateBellatrix_CanSaveRetrieveValidatorEntries(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) stateValidators := validators(10) st, _ := util.DeterministicGenesisStateBellatrix(t, 20) require.NoError(t, st.SetSlot(100)) require.NoError(t, st.SetValidators(stateValidators)) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveState(ctx, st, r)) - assert.Equal(t, true, db.HasState(context.Background(), r)) + assert.Equal(t, true, db.HasState(t.Context(), r)) - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching") @@ -1028,20 +1027,20 @@ func TestBellatrixState_CanSaveRetrieve(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) st, _ := util.DeterministicGenesisStateBellatrix(t, 1) require.NoError(t, st.SetSlot(100)) - require.NoError(t, db.SaveState(context.Background(), st, r)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) + require.Equal(t, true, db.HasState(t.Context(), r)) - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe()) - savedS, err = db.State(context.Background(), [32]byte{'B'}) + savedS, err = db.State(t.Context(), [32]byte{'B'}) require.NoError(t, err) require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil") } @@ -1051,16 +1050,16 @@ func TestBellatrixState_CanDelete(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) st, _ := util.DeterministicGenesisStateBellatrix(t, 1) require.NoError(t, st.SetSlot(100)) - require.NoError(t, db.SaveState(context.Background(), st, r)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) + require.Equal(t, true, db.HasState(t.Context(), r)) - require.NoError(t, db.DeleteState(context.Background(), r)) - savedS, err := db.State(context.Background(), r) + require.NoError(t, db.DeleteState(t.Context(), r)) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil") } @@ -1074,18 +1073,18 @@ func TestBellatrixState_CanDeleteWithBlock(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) st, _ := util.DeterministicGenesisStateBellatrix(t, 1) require.NoError(t, st.SetSlot(100)) - require.NoError(t, db.SaveState(context.Background(), st, r)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) + require.Equal(t, true, db.HasState(t.Context(), r)) - require.NoError(t, db.DeleteState(context.Background(), r)) - savedS, err := db.State(context.Background(), r) + require.NoError(t, db.DeleteState(t.Context(), r)) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil") } @@ -1095,20 +1094,20 @@ func TestDenebState_CanSaveRetrieve(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) st, _ := util.DeterministicGenesisStateDeneb(t, 1) require.NoError(t, st.SetSlot(100)) - require.NoError(t, db.SaveState(context.Background(), st, r)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) + require.Equal(t, true, db.HasState(t.Context(), r)) - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe()) - savedS, err = db.State(context.Background(), [32]byte{'B'}) + savedS, err = db.State(t.Context(), [32]byte{'B'}) require.NoError(t, err) require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil") } @@ -1118,16 +1117,16 @@ func TestDenebState_CanDelete(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) st, _ := util.DeterministicGenesisStateDeneb(t, 1) require.NoError(t, st.SetSlot(100)) - require.NoError(t, db.SaveState(context.Background(), st, r)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) + require.Equal(t, true, db.HasState(t.Context(), r)) - require.NoError(t, db.DeleteState(context.Background(), r)) - savedS, err := db.State(context.Background(), r) + require.NoError(t, db.DeleteState(t.Context(), r)) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil") } @@ -1143,18 +1142,18 @@ func TestStateDeneb_CanSaveRetrieveValidatorEntries(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) stateValidators := validators(10) st, _ := util.DeterministicGenesisStateDeneb(t, 20) require.NoError(t, st.SetSlot(100)) require.NoError(t, st.SetValidators(stateValidators)) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveState(ctx, st, r)) - assert.Equal(t, true, db.HasState(context.Background(), r)) + assert.Equal(t, true, db.HasState(t.Context(), r)) - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.DeepSSZEqual(t, st.Validators(), savedS.Validators(), "saved state with validators and retrieved state are not matching") @@ -1189,20 +1188,20 @@ func TestElectraState_CanSaveRetrieve(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) st, _ := util.DeterministicGenesisStateElectra(t, 1) require.NoError(t, st.SetSlot(100)) - require.NoError(t, db.SaveState(context.Background(), st, r)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) + require.Equal(t, true, db.HasState(t.Context(), r)) - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) assert.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe()) - savedS, err = db.State(context.Background(), [32]byte{'B'}) + savedS, err = db.State(t.Context(), [32]byte{'B'}) require.NoError(t, err) require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil") } @@ -1212,16 +1211,16 @@ func TestElectraState_CanDelete(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) st, _ := util.DeterministicGenesisStateElectra(t, 1) require.NoError(t, st.SetSlot(100)) - require.NoError(t, db.SaveState(context.Background(), st, r)) - require.Equal(t, true, db.HasState(context.Background(), r)) + require.NoError(t, db.SaveState(t.Context(), st, r)) + require.Equal(t, true, db.HasState(t.Context(), r)) - require.NoError(t, db.DeleteState(context.Background(), r)) - savedS, err := db.State(context.Background(), r) + require.NoError(t, db.DeleteState(t.Context(), r)) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil") } @@ -1237,18 +1236,18 @@ func TestStateElectra_CanSaveRetrieveValidatorEntries(t *testing.T) { r := [32]byte{'A'} - require.Equal(t, false, db.HasState(context.Background(), r)) + require.Equal(t, false, db.HasState(t.Context(), r)) stateValidators := validators(10) st, _ := util.DeterministicGenesisStateElectra(t, 20) require.NoError(t, st.SetSlot(100)) require.NoError(t, st.SetValidators(stateValidators)) - ctx := context.Background() + ctx := t.Context() require.NoError(t, db.SaveState(ctx, st, r)) - assert.Equal(t, true, db.HasState(context.Background(), r)) + assert.Equal(t, true, db.HasState(t.Context(), r)) - savedS, err := db.State(context.Background(), r) + savedS, err := db.State(t.Context(), r) require.NoError(t, err) require.DeepSSZEqual(t, st.Validators(), savedS.Validators(), "saved state with validators and retrieved state are not matching") diff --git a/beacon-chain/db/kv/utils_test.go b/beacon-chain/db/kv/utils_test.go index 2dc04305b5..e8d761cbad 100644 --- a/beacon-chain/db/kv/utils_test.go +++ b/beacon-chain/db/kv/utils_test.go @@ -2,7 +2,6 @@ package kv import ( "bytes" - "context" "crypto/rand" "testing" @@ -121,7 +120,7 @@ func Test_deleteValueForIndices(t *testing.T) { bkt := tx.Bucket([]byte(k)) require.NoError(t, bkt.Put(idx, tt.inputIndices[k])) } - err := deleteValueForIndices(context.Background(), tt.inputIndices, tt.root, tx) + err := deleteValueForIndices(t.Context(), tt.inputIndices, tt.root, tx) if tt.wantedErr != "" { assert.ErrorContains(t, tt.wantedErr, err) return nil diff --git a/beacon-chain/db/kv/validated_checkpoint_test.go b/beacon-chain/db/kv/validated_checkpoint_test.go index b081f696fd..85aa5a2792 100644 --- a/beacon-chain/db/kv/validated_checkpoint_test.go +++ b/beacon-chain/db/kv/validated_checkpoint_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/consensus-types/blocks" @@ -15,7 +14,7 @@ import ( func TestStore_LastValidatedCheckpoint_CanSaveRetrieve(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() root := bytesutil.ToBytes32([]byte{'A'}) cp := ðpb.Checkpoint{ Epoch: 10, @@ -34,7 +33,7 @@ func TestStore_LastValidatedCheckpoint_CanSaveRetrieve(t *testing.T) { func TestStore_LastValidatedCheckpoint_Recover(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() blk := util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{}) r, err := blk.Block.HashTreeRoot() require.NoError(t, err) @@ -53,7 +52,7 @@ func TestStore_LastValidatedCheckpoint_Recover(t *testing.T) { func BenchmarkStore_SaveLastValidatedCheckpoint(b *testing.B) { db := setupDB(b) - ctx := context.Background() + ctx := b.Context() root := bytesutil.ToBytes32([]byte{'A'}) cp := ðpb.Checkpoint{ Epoch: 10, @@ -73,7 +72,7 @@ func BenchmarkStore_SaveLastValidatedCheckpoint(b *testing.B) { func TestStore_LastValidatedCheckpoint_DefaultIsFinalized(t *testing.T) { db := setupDB(t) - ctx := context.Background() + ctx := t.Context() genesis := bytesutil.ToBytes32([]byte{'G', 'E', 'N', 'E', 'S', 'I', 'S'}) require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesis)) diff --git a/beacon-chain/db/kv/wss_test.go b/beacon-chain/db/kv/wss_test.go index ebe484646a..cf31228fbc 100644 --- a/beacon-chain/db/kv/wss_test.go +++ b/beacon-chain/db/kv/wss_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis" @@ -16,7 +15,7 @@ func TestSaveOrigin(t *testing.T) { // Embedded Genesis works with Mainnet config params.OverrideBeaconConfig(params.MainnetConfig()) - ctx := context.Background() + ctx := t.Context() db := setupDB(t) st, err := genesis.State(params.MainnetName) diff --git a/beacon-chain/db/pruner/pruner_test.go b/beacon-chain/db/pruner/pruner_test.go index 2e4f4d268e..58e2e1ae00 100644 --- a/beacon-chain/db/pruner/pruner_test.go +++ b/beacon-chain/db/pruner/pruner_test.go @@ -44,7 +44,7 @@ func TestPruner_PruningConditions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) hook := logTest.NewGlobal() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) beaconDB := dbtest.SetupDB(t) slotTicker := &slottest.MockTicker{Channel: make(chan primitives.Slot)} @@ -79,7 +79,7 @@ func TestPruner_PruningConditions(t *testing.T) { } func TestPruner_PruneSuccess(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := dbtest.SetupDB(t) // Create and save some blocks at different slots diff --git a/beacon-chain/db/restore_test.go b/beacon-chain/db/restore_test.go index 8e501684c8..f246d0fee0 100644 --- a/beacon-chain/db/restore_test.go +++ b/beacon-chain/db/restore_test.go @@ -1,7 +1,6 @@ package db import ( - "context" "flag" "os" "path" @@ -20,9 +19,9 @@ import ( func TestRestore(t *testing.T) { logHook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() - backupDb, err := kv.NewKVStore(context.Background(), t.TempDir()) + backupDb, err := kv.NewKVStore(t.Context(), t.TempDir()) require.NoError(t, err) head := util.NewBeaconBlock() head.Block.Slot = 5000 @@ -58,7 +57,7 @@ func TestRestore(t *testing.T) { require.NoError(t, err) assert.Equal(t, 1, len(files)) assert.Equal(t, kv.DatabaseFileName, files[0].Name()) - restoredDb, err := kv.NewKVStore(context.Background(), path.Join(restoreDir, kv.BeaconNodeDbDirName)) + restoredDb, err := kv.NewKVStore(t.Context(), path.Join(restoreDir, kv.BeaconNodeDbDirName)) defer func() { require.NoError(t, restoredDb.Close()) }() diff --git a/beacon-chain/db/slasherkv/kv_test.go b/beacon-chain/db/slasherkv/kv_test.go index cf91edb5d7..0f39ba0df7 100644 --- a/beacon-chain/db/slasherkv/kv_test.go +++ b/beacon-chain/db/slasherkv/kv_test.go @@ -1,7 +1,6 @@ package slasherkv import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/testing/require" @@ -9,7 +8,7 @@ import ( // setupDB instantiates and returns a Store instance. func setupDB(t testing.TB) *Store { - db, err := NewKVStore(context.Background(), t.TempDir()) + db, err := NewKVStore(t.Context(), t.TempDir()) require.NoError(t, err, "Failed to instantiate DB") t.Cleanup(func() { require.NoError(t, db.Close(), "Failed to close database") diff --git a/beacon-chain/db/slasherkv/migrate_test.go b/beacon-chain/db/slasherkv/migrate_test.go index 1c914c0f27..5ef5ac62f8 100644 --- a/beacon-chain/db/slasherkv/migrate_test.go +++ b/beacon-chain/db/slasherkv/migrate_test.go @@ -1,7 +1,6 @@ package slasherkv import ( - "context" "encoding/binary" "testing" @@ -120,7 +119,7 @@ func TestMigrate(t *testing.T) { } // Create a new context. - ctx := context.Background() + ctx := t.Context() // Setup a test database. beaconDB := setupDB(t) diff --git a/beacon-chain/db/slasherkv/pruning_test.go b/beacon-chain/db/slasherkv/pruning_test.go index a3b2ed16a3..93bc95f121 100644 --- a/beacon-chain/db/slasherkv/pruning_test.go +++ b/beacon-chain/db/slasherkv/pruning_test.go @@ -1,7 +1,6 @@ package slasherkv import ( - "context" "fmt" "testing" @@ -16,7 +15,7 @@ import ( ) func TestStore_PruneProposalsAtEpoch(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // If the lowest stored epoch in the database is >= the end epoch of the pruning process, // there is nothing to prune, so we also expect exiting early. @@ -112,7 +111,7 @@ func TestStore_PruneProposalsAtEpoch(t *testing.T) { } func TestStore_PruneAttestations_OK(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // If the lowest stored epoch in the database is >= the end epoch of the pruning process, // there is nothing to prune, so we also expect exiting early. diff --git a/beacon-chain/db/slasherkv/slasher_test.go b/beacon-chain/db/slasherkv/slasher_test.go index 6fab904bd5..2044520c3b 100644 --- a/beacon-chain/db/slasherkv/slasher_test.go +++ b/beacon-chain/db/slasherkv/slasher_test.go @@ -1,7 +1,6 @@ package slasherkv import ( - "context" "encoding/binary" "math/rand" "reflect" @@ -20,7 +19,7 @@ import ( func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) { const attestationsCount = 11_000 - ctx := context.Background() + ctx := t.Context() beaconDB := setupDB(t) phase0ValidatorIndex := primitives.ValidatorIndex(1) electraValidatorIndex := primitives.ValidatorIndex(2) @@ -101,7 +100,7 @@ func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) { } func TestStore_LastEpochWrittenForValidators(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := setupDB(t) validatorsCount := 11000 @@ -141,7 +140,7 @@ func TestStore_LastEpochWrittenForValidators(t *testing.T) { } func TestStore_CheckAttesterDoubleVotes(t *testing.T) { - ctx := context.Background() + ctx := t.Context() for _, ver := range []int{version.Phase0, version.Electra} { t.Run(version.String(ver), func(t *testing.T) { @@ -207,7 +206,7 @@ func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) { ) // Create context. - ctx := context.Background() + ctx := t.Context() // Create database. beaconDB := setupDB(t) @@ -311,7 +310,7 @@ func TestStore_SlasherChunk_SaveRetrieve(t *testing.T) { } func TestStore_SlasherChunk_PreventsSavingWrongLength(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := setupDB(t) totalChunks := 64 chunkKeys := make([][]byte, totalChunks) @@ -326,7 +325,7 @@ func TestStore_SlasherChunk_PreventsSavingWrongLength(t *testing.T) { } func TestStore_ExistingBlockProposals(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := setupDB(t) proposals := []*slashertypes.SignedBlockHeaderWrapper{ createProposalWrapper(t, 1, 1, []byte{1}), @@ -469,7 +468,7 @@ func Test_encodeDecodeAttestationRecord(t *testing.T) { } func TestStore_HighestAttestations(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string attestationsInDB []*slashertypes.IndexedAttestationWrapper @@ -583,7 +582,7 @@ func BenchmarkHighestAttestations(b *testing.B) { atts[i] = createAttestationWrapper(version.Phase0, primitives.Epoch(i), primitives.Epoch(i+2), indicesPerAtt[i], []byte{}) } - ctx := context.Background() + ctx := b.Context() beaconDB := setupDB(b) require.NoError(b, beaconDB.SaveAttestationRecordsForValidators(ctx, atts)) @@ -620,7 +619,7 @@ func BenchmarkStore_CheckDoubleBlockProposals(b *testing.B) { atts[i] = createAttestationWrapper(version.Phase0, primitives.Epoch(i), primitives.Epoch(i+2), indicesPerAtt[i], []byte{}) } - ctx := context.Background() + ctx := b.Context() beaconDB := setupDB(b) require.NoError(b, beaconDB.SaveAttestationRecordsForValidators(ctx, atts)) diff --git a/beacon-chain/execution/block_reader_test.go b/beacon-chain/execution/block_reader_test.go index ef4d712315..9260f3ab98 100644 --- a/beacon-chain/execution/block_reader_test.go +++ b/beacon-chain/execution/block_reader_test.go @@ -35,7 +35,7 @@ func TestLatestMainchainInfo_OK(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -77,7 +77,7 @@ func TestBlockHashByHeight_ReturnsHash(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -85,7 +85,7 @@ func TestBlockHashByHeight_ReturnsHash(t *testing.T) { web3Service = setDefaultMocks(web3Service) web3Service.rpcClient = &mockExecution.RPCClient{} - ctx := context.Background() + ctx := t.Context() header := &gethTypes.Header{ Number: big.NewInt(15), @@ -110,7 +110,7 @@ func TestBlockHashByHeight_ReturnsError_WhenNoEth1Client(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -118,7 +118,7 @@ func TestBlockHashByHeight_ReturnsError_WhenNoEth1Client(t *testing.T) { web3Service = setDefaultMocks(web3Service) web3Service.rpcClient = nil - ctx := context.Background() + ctx := t.Context() _, err = web3Service.BlockHashByHeight(ctx, big.NewInt(0)) require.ErrorContains(t, "nil rpc client", err) @@ -133,7 +133,7 @@ func TestBlockExists_ValidHash(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -142,10 +142,10 @@ func TestBlockExists_ValidHash(t *testing.T) { web3Service = setDefaultMocks(web3Service) web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend} testAcc.Backend.Commit() - block, err := testAcc.Backend.Client().BlockByNumber(context.Background(), big.NewInt(0)) + block, err := testAcc.Backend.Client().BlockByNumber(t.Context(), big.NewInt(0)) assert.NoError(t, err) - exists, height, err := web3Service.BlockExists(context.Background(), block.Hash()) + exists, height, err := web3Service.BlockExists(t.Context(), block.Hash()) require.NoError(t, err, "Could not get block hash with given height") require.Equal(t, true, exists) require.Equal(t, 0, height.Cmp(block.Number())) @@ -163,7 +163,7 @@ func TestBlockExists_InvalidHash(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -171,7 +171,7 @@ func TestBlockExists_InvalidHash(t *testing.T) { web3Service = setDefaultMocks(web3Service) - _, _, err = web3Service.BlockExists(context.Background(), common.BytesToHash([]byte{0})) + _, _, err = web3Service.BlockExists(t.Context(), common.BytesToHash([]byte{0})) require.NotNil(t, err, "Expected BlockExists to error with invalid hash") } @@ -182,7 +182,7 @@ func TestBlockExists_UsesCachedBlockInfo(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -195,14 +195,14 @@ func TestBlockExists_UsesCachedBlockInfo(t *testing.T) { err = web3Service.headerCache.AddHeader(header) require.NoError(t, err) - exists, height, err := web3Service.BlockExists(context.Background(), header.Hash) + exists, height, err := web3Service.BlockExists(t.Context(), header.Hash) require.NoError(t, err, "Could not get block hash with given height") require.Equal(t, true, exists) require.Equal(t, 0, height.Cmp(header.Number)) } func TestService_BlockNumberByTimestamp(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := dbutil.SetupDB(t) testAcc, err := mock.Setup() @@ -212,7 +212,7 @@ func TestService_BlockNumberByTimestamp(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -250,7 +250,7 @@ func TestService_BlockNumberByTimestampLessTargetTime(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -261,7 +261,7 @@ func TestService_BlockNumberByTimestampLessTargetTime(t *testing.T) { for i := 0; i < 200; i++ { testAcc.Backend.Commit() } - ctx := context.Background() + ctx := t.Context() hd, err := testAcc.Backend.Client().HeaderByNumber(ctx, nil) require.NoError(t, err) web3Service.latestEth1Data.BlockTime = hd.Time @@ -274,7 +274,7 @@ func TestService_BlockNumberByTimestampLessTargetTime(t *testing.T) { require.ErrorContains(t, context.DeadlineExceeded.Error(), err) // Provide an attainable target time - blk, err := web3Service.findMaxTargetEth1Block(context.Background(), hd.Number, hd.Time-5) + blk, err := web3Service.findMaxTargetEth1Block(t.Context(), hd.Number, hd.Time-5) require.NoError(t, err) require.NotEqual(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not less than the head") } @@ -288,7 +288,7 @@ func TestService_BlockNumberByTimestampMoreTargetTime(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -299,7 +299,7 @@ func TestService_BlockNumberByTimestampMoreTargetTime(t *testing.T) { for i := 0; i < 200; i++ { testAcc.Backend.Commit() } - ctx := context.Background() + ctx := t.Context() hd, err := testAcc.Backend.Client().HeaderByNumber(ctx, nil) require.NoError(t, err) web3Service.latestEth1Data.BlockTime = hd.Time @@ -312,7 +312,7 @@ func TestService_BlockNumberByTimestampMoreTargetTime(t *testing.T) { require.ErrorContains(t, context.DeadlineExceeded.Error(), err) // Provide an attainable target time with respect to head - blk, err := web3Service.findMinTargetEth1Block(context.Background(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time) + blk, err := web3Service.findMinTargetEth1Block(t.Context(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time) require.NoError(t, err) require.Equal(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not equal to the head") } @@ -324,7 +324,7 @@ func TestService_BlockTimeByHeight_ReturnsError_WhenNoEth1Client(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -332,7 +332,7 @@ func TestService_BlockTimeByHeight_ReturnsError_WhenNoEth1Client(t *testing.T) { web3Service = setDefaultMocks(web3Service) web3Service.rpcClient = nil - ctx := context.Background() + ctx := t.Context() _, err = web3Service.BlockTimeByHeight(ctx, big.NewInt(0)) require.ErrorContains(t, "nil rpc client", err) diff --git a/beacon-chain/execution/deposit_test.go b/beacon-chain/execution/deposit_test.go index 9953026e42..f659cdf5e9 100644 --- a/beacon-chain/execution/deposit_test.go +++ b/beacon-chain/execution/deposit_test.go @@ -1,7 +1,6 @@ package execution import ( - "context" "fmt" "testing" @@ -58,7 +57,7 @@ func TestProcessDeposit_OK(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -72,10 +71,10 @@ func TestProcessDeposit_OK(t *testing.T) { eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - err = web3Service.processDeposit(context.Background(), eth1Data, deposits[0]) + err = web3Service.processDeposit(t.Context(), eth1Data, deposits[0]) require.NoError(t, err, "could not process deposit") - valcount, err := helpers.ActiveValidatorCount(context.Background(), web3Service.preGenesisState, 0) + valcount, err := helpers.ActiveValidatorCount(t.Context(), web3Service.preGenesisState, 0) require.NoError(t, err) require.Equal(t, 1, int(valcount), "Did not get correct active validator count") } @@ -87,7 +86,7 @@ func TestProcessDeposit_InvalidMerkleBranch(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -102,7 +101,7 @@ func TestProcessDeposit_InvalidMerkleBranch(t *testing.T) { deposits[0].Proof = [][]byte{{'f', 'a', 'k', 'e'}} - err = web3Service.processDeposit(context.Background(), eth1Data, deposits[0]) + err = web3Service.processDeposit(t.Context(), eth1Data, deposits[0]) require.NotNil(t, err, "No errors, when an error was expected") want := "deposit merkle branch of deposit root did not verify for root" @@ -118,7 +117,7 @@ func TestProcessDeposit_InvalidPublicKey(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -146,7 +145,7 @@ func TestProcessDeposit_InvalidPublicKey(t *testing.T) { DepositRoot: root[:], } - err = web3Service.processDeposit(context.Background(), eth1Data, deposits[0]) + err = web3Service.processDeposit(t.Context(), eth1Data, deposits[0]) require.NoError(t, err) require.LogsContain(t, hook, pubKeyErr) @@ -160,7 +159,7 @@ func TestProcessDeposit_InvalidSignature(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -187,7 +186,7 @@ func TestProcessDeposit_InvalidSignature(t *testing.T) { DepositRoot: root[:], } - err = web3Service.processDeposit(context.Background(), eth1Data, deposits[0]) + err = web3Service.processDeposit(t.Context(), eth1Data, deposits[0]) require.NoError(t, err) require.LogsContain(t, hook, "could not verify deposit data signature") @@ -202,7 +201,7 @@ func TestProcessDeposit_UnableToVerify(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -225,7 +224,7 @@ func TestProcessDeposit_UnableToVerify(t *testing.T) { proof, err := generatedTrie.MerkleProof(0) require.NoError(t, err) deposits[0].Proof = proof - err = web3Service.processDeposit(context.Background(), eth1Data, deposits[0]) + err = web3Service.processDeposit(t.Context(), eth1Data, deposits[0]) require.NoError(t, err) want := "signature did not verify" @@ -240,7 +239,7 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -293,10 +292,10 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) { deposit.Proof, err = generatedTrie.MerkleProof(i) require.NoError(t, err) - err = web3Service.processDeposit(context.Background(), eth1Data, deposit) + err = web3Service.processDeposit(t.Context(), eth1Data, deposit) require.NoError(t, err, fmt.Sprintf("Could not process deposit at %d", i)) - valcount, err := helpers.ActiveValidatorCount(context.Background(), web3Service.preGenesisState, 0) + valcount, err := helpers.ActiveValidatorCount(t.Context(), web3Service.preGenesisState, 0) require.NoError(t, err) require.Equal(t, 0, int(valcount), "Did not get correct active validator count") } @@ -309,7 +308,7 @@ func TestProcessDeposit_AllDepositedSuccessfully(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -323,10 +322,10 @@ func TestProcessDeposit_AllDepositedSuccessfully(t *testing.T) { for i := range keys { eth1Data.DepositCount = uint64(i + 1) - err = web3Service.processDeposit(context.Background(), eth1Data, deposits[i]) + err = web3Service.processDeposit(t.Context(), eth1Data, deposits[i]) require.NoError(t, err, fmt.Sprintf("Could not process deposit at %d", i)) - valCount, err := helpers.ActiveValidatorCount(context.Background(), web3Service.preGenesisState, 0) + valCount, err := helpers.ActiveValidatorCount(t.Context(), web3Service.preGenesisState, 0) require.NoError(t, err) require.Equal(t, uint64(i+1), valCount, "Did not get correct active validator count") diff --git a/beacon-chain/execution/engine_client_test.go b/beacon-chain/execution/engine_client_test.go index 97562d844f..ee2e6a5b0c 100644 --- a/beacon-chain/execution/engine_client_test.go +++ b/beacon-chain/execution/engine_client_test.go @@ -66,7 +66,7 @@ func TestClient_IPC(t *testing.T) { defer rpcClient.Close() srv := &Service{} srv.rpcClient = rpcClient - ctx := context.Background() + ctx := t.Context() fix := fixtures() params.SetupTestConfigCleanup(t) @@ -159,7 +159,7 @@ func TestClient_IPC(t *testing.T) { } func TestClient_HTTP(t *testing.T) { - ctx := context.Background() + ctx := t.Context() fix := fixtures() params.SetupTestConfigCleanup(t) @@ -954,7 +954,7 @@ func TestClient_HTTP(t *testing.T) { } func TestReconstructFullBellatrixBlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("nil block", func(t *testing.T) { service := &Service{} @@ -1046,7 +1046,7 @@ func TestReconstructFullBellatrixBlock(t *testing.T) { } func TestReconstructFullBellatrixBlockBatch(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("nil block", func(t *testing.T) { service := &Service{} @@ -1374,7 +1374,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) { ExecutionBlock: tt.currentPowBlock, BlockByHashMap: m, } - b, e, err := client.GetTerminalBlockHash(context.Background(), 1) + b, e, err := client.GetTerminalBlockHash(t.Context(), 1) if tt.errString != "" { require.ErrorContains(t, tt.errString, err) } else { @@ -1908,7 +1908,7 @@ func TestHeaderByHash_NotFound(t *testing.T) { srv := &Service{} srv.rpcClient = RPCClientBad{} - _, err := srv.HeaderByHash(context.Background(), [32]byte{}) + _, err := srv.HeaderByHash(t.Context(), [32]byte{}) assert.Equal(t, ethereum.NotFound, err) } @@ -1916,7 +1916,7 @@ func TestHeaderByNumber_NotFound(t *testing.T) { srv := &Service{} srv.rpcClient = RPCClientBad{} - _, err := srv.HeaderByNumber(context.Background(), big.NewInt(100)) + _, err := srv.HeaderByNumber(t.Context(), big.NewInt(100)) assert.Equal(t, ethereum.NotFound, err) } @@ -2300,7 +2300,7 @@ func newPayloadV4Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu func TestReconstructBlindedBlockBatch(t *testing.T) { t.Run("empty response works", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() cli, srv := newMockEngine(t) srv.registerDefault(func(*jsonrpcMessage, http.ResponseWriter, *http.Request) { @@ -2311,7 +2311,7 @@ func TestReconstructBlindedBlockBatch(t *testing.T) { require.Equal(t, 0, len(results)) }) t.Run("expected error for nil response", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() slot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch) require.NoError(t, err) blk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 0) @@ -2345,7 +2345,7 @@ func Test_ExchangeCapabilities(t *testing.T) { err := json.NewEncoder(w).Encode(resp) require.NoError(t, err) })) - ctx := context.Background() + ctx := t.Context() logHook := logTest.NewGlobal() rpcClient, err := rpc.DialHTTP(srv.URL) @@ -2378,7 +2378,7 @@ func Test_ExchangeCapabilities(t *testing.T) { err := json.NewEncoder(w).Encode(resp) require.NoError(t, err) })) - ctx := context.Background() + ctx := t.Context() rpcClient, err := rpc.DialHTTP(srv.URL) require.NoError(t, err) @@ -2413,7 +2413,7 @@ func TestReconstructBlobSidecars(t *testing.T) { sb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() t.Run("all seen", func(t *testing.T) { hi := mockSummary(t, []bool{true, true, true, true, true, true}) verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi) diff --git a/beacon-chain/execution/log_processing_test.go b/beacon-chain/execution/log_processing_test.go index 4cd6e020b5..3f50a761de 100644 --- a/beacon-chain/execution/log_processing_test.go +++ b/beacon-chain/execution/log_processing_test.go @@ -1,7 +1,6 @@ package execution import ( - "context" "encoding/binary" "math/big" "testing" @@ -39,7 +38,7 @@ func TestProcessDepositLog_OK(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -79,7 +78,7 @@ func TestProcessDepositLog_OK(t *testing.T) { t.Fatal("no logs") } - err = web3Service.ProcessLog(context.Background(), &logs[0]) + err = web3Service.ProcessLog(t.Context(), &logs[0]) require.NoError(t, err) require.LogsDoNotContain(t, hook, "Could not unpack log") @@ -108,7 +107,7 @@ func TestProcessDepositLog_InsertsPendingDeposit(t *testing.T) { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -149,12 +148,12 @@ func TestProcessDepositLog_InsertsPendingDeposit(t *testing.T) { web3Service.chainStartData.Chainstarted = true - err = web3Service.ProcessDepositLog(context.Background(), &logs[0]) + err = web3Service.ProcessDepositLog(t.Context(), &logs[0]) require.NoError(t, err) - err = web3Service.ProcessDepositLog(context.Background(), &logs[1]) + err = web3Service.ProcessDepositLog(t.Context(), &logs[1]) require.NoError(t, err) - pendingDeposits := web3Service.cfg.depositCache.PendingDeposits(context.Background(), nil /*blockNum*/) + pendingDeposits := web3Service.cfg.depositCache.PendingDeposits(t.Context(), nil /*blockNum*/) require.Equal(t, 2, len(pendingDeposits), "Unexpected number of deposits") hook.Reset() @@ -169,7 +168,7 @@ func TestUnpackDepositLogData_OK(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -224,7 +223,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -273,7 +272,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) { require.NoError(t, err, "Unable to retrieve logs") for i := range logs { - err = web3Service.ProcessLog(context.Background(), &logs[i]) + err = web3Service.ProcessLog(t.Context(), &logs[i]) require.NoError(t, err) } assert.Equal(t, false, web3Service.chainStartData.Chainstarted, "Genesis has been triggered despite being 8 duplicate keys") @@ -299,7 +298,7 @@ func TestProcessETH2GenesisLog(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -352,11 +351,11 @@ func TestProcessETH2GenesisLog(t *testing.T) { defer stateSub.Unsubscribe() for i := range logs { - err = web3Service.ProcessLog(context.Background(), &logs[i]) + err = web3Service.ProcessLog(t.Context(), &logs[i]) require.NoError(t, err) } - err = web3Service.ProcessETH1Block(context.Background(), big.NewInt(int64(logs[len(logs)-1].BlockNumber))) + err = web3Service.ProcessETH1Block(t.Context(), big.NewInt(int64(logs[len(logs)-1].BlockNumber))) require.NoError(t, err) cachedDeposits := web3Service.chainStartData.ChainstartDeposits @@ -392,7 +391,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(kvStore), @@ -405,7 +404,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) { web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend} web3Service.httpLogger = testAcc.Backend.Client() web3Service.latestEth1Data.LastRequestedBlock = 0 - block, err := testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + block, err := testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) web3Service.latestEth1Data.BlockHeight = block.NumberU64() web3Service.latestEth1Data.BlockTime = block.Time() @@ -446,7 +445,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) { for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ { testAcc.Backend.Commit() } - b, err := testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + b, err := testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) web3Service.latestEth1Data.BlockHeight = b.NumberU64() web3Service.latestEth1Data.BlockTime = b.Time() @@ -456,7 +455,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) { stateSub := web3Service.cfg.stateNotifier.StateFeed().Subscribe(stateChannel) defer stateSub.Unsubscribe() - err = web3Service.processPastLogs(context.Background()) + err = web3Service.processPastLogs(t.Context()) require.NoError(t, err) cachedDeposits := web3Service.chainStartData.ChainstartDeposits @@ -493,7 +492,7 @@ func TestProcessLogs_DepositRequestsStarted(t *testing.T) { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(kvStore), @@ -506,7 +505,7 @@ func TestProcessLogs_DepositRequestsStarted(t *testing.T) { web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend} web3Service.httpLogger = testAcc.Backend.Client() web3Service.latestEth1Data.LastRequestedBlock = 0 - block, err := testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + block, err := testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) web3Service.latestEth1Data.BlockHeight = block.NumberU64() web3Service.latestEth1Data.BlockTime = block.Time() @@ -547,7 +546,7 @@ func TestProcessLogs_DepositRequestsStarted(t *testing.T) { for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ { testAcc.Backend.Commit() } - b, err := testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + b, err := testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) web3Service.latestEth1Data.BlockHeight = b.NumberU64() web3Service.latestEth1Data.BlockTime = b.Time() @@ -580,7 +579,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(kvStore), @@ -593,7 +592,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) { web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend} web3Service.httpLogger = testAcc.Backend.Client() web3Service.latestEth1Data.LastRequestedBlock = 0 - b, err := testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + b, err := testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) web3Service.latestEth1Data.BlockHeight = b.NumberU64() web3Service.latestEth1Data.BlockTime = b.Time() @@ -633,7 +632,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) { for i := uint64(0); i < 1500; i++ { testAcc.Backend.Commit() } - genesisBlock, err := testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + genesisBlock, err := testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) wantedGenesisTime := genesisBlock.Time() @@ -642,7 +641,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) { for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ { testAcc.Backend.Commit() } - currBlock, err := testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + currBlock, err := testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) web3Service.latestEth1Data.BlockHeight = currBlock.NumberU64() web3Service.latestEth1Data.BlockTime = currBlock.Time() @@ -658,7 +657,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) { stateSub := web3Service.cfg.stateNotifier.StateFeed().Subscribe(stateChannel) defer stateSub.Unsubscribe() - err = web3Service.processPastLogs(context.Background()) + err = web3Service.processPastLogs(t.Context()) require.NoError(t, err) cachedDeposits := web3Service.chainStartData.ChainstartDeposits @@ -686,7 +685,7 @@ func TestCheckForChainstart_NoValidator(t *testing.T) { require.NoError(t, err, "Unable to set up simulated backend") beaconDB := testDB.SetupDB(t) s := newPowchainService(t, testAcc, beaconDB) - s.processChainStartIfReady(context.Background(), [32]byte{}, nil, 0) + s.processChainStartIfReady(t.Context(), [32]byte{}, nil, 0) require.LogsDoNotContain(t, hook, "Could not determine active validator count from pre genesis state") } @@ -698,7 +697,7 @@ func newPowchainService(t *testing.T, eth1Backend *mock.TestAccount, beaconDB db t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(eth1Backend.ContractAddr), WithDatabase(beaconDB), diff --git a/beacon-chain/execution/mock_test.go b/beacon-chain/execution/mock_test.go index 6e22a1f08a..6db5d1d7d1 100644 --- a/beacon-chain/execution/mock_test.go +++ b/beacon-chain/execution/mock_test.go @@ -1,7 +1,6 @@ package execution import ( - "context" "encoding/json" "net/http" "net/http/httptest" @@ -117,7 +116,7 @@ func mockWriteResult(t *testing.T, w http.ResponseWriter, req *jsonrpcMessage, r } func TestParseRequest(t *testing.T) { - ctx := context.Background() + ctx := t.Context() cases := []struct { method string hexArgs []string // uint64 as hex @@ -197,7 +196,7 @@ func TestCallCount(t *testing.T) { mockWriteResult(t, w, msg, nil) }) for i := 0; i < c.count; i++ { - require.NoError(t, cli.CallContext(context.Background(), nil, c.method)) + require.NoError(t, cli.CallContext(t.Context(), nil, c.method)) } for _, m := range methods { if m == c.method { diff --git a/beacon-chain/execution/payload_body_test.go b/beacon-chain/execution/payload_body_test.go index 8fba6bb269..9ad162a80c 100644 --- a/beacon-chain/execution/payload_body_test.go +++ b/beacon-chain/execution/payload_body_test.go @@ -1,7 +1,6 @@ package execution import ( - "context" "net/http" "testing" @@ -125,7 +124,7 @@ func TestPayloadBodiesViaUnblinder(t *testing.T) { } mockWriteResult(t, w, msg, executionPayloadBodies) }) - ctx := context.Background() + ctx := t.Context() toUnblind := []interfaces.ReadOnlySignedBeaconBlock{ fx.denebBlock.blinded.block, @@ -255,7 +254,7 @@ func TestComputeRanges(t *testing.T) { func TestReconstructBlindedBlockBatchFallbackToRange(t *testing.T) { defer util.HackForksMaxuint(t, []int{version.Electra, version.Fulu})() - ctx := context.Background() + ctx := t.Context() t.Run("fallback fails", func(t *testing.T) { cli, srv := newMockEngine(t) fx := testBlindedBlockFixtures(t) @@ -353,7 +352,7 @@ func TestReconstructBlindedBlockBatchDenebAndBeyond(t *testing.T) { fx.electra.blinded.block, fx.fulu.blinded.block, } - unblinded, err := reconstructBlindedBlockBatch(context.Background(), cli, blinded) + unblinded, err := reconstructBlindedBlockBatch(t.Context(), cli, blinded) require.NoError(t, err) require.Equal(t, len(blinded), len(unblinded)) for i := range unblinded { diff --git a/beacon-chain/execution/prometheus_test.go b/beacon-chain/execution/prometheus_test.go index 86dac0db1f..069414a442 100644 --- a/beacon-chain/execution/prometheus_test.go +++ b/beacon-chain/execution/prometheus_test.go @@ -13,7 +13,7 @@ import ( // also tests the interchangeability of the explicit prometheus Register/Unregister // and the implicit methods within the collector implementation func TestCleanup(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pc, err := NewPowchainCollector(ctx) assert.NoError(t, err, "Unexpected error calling NewPowchainCollector") unregistered := pc.unregister() @@ -37,7 +37,7 @@ func TestCleanup(t *testing.T) { // does come at the cost of an extra channel cluttering up // PowchainCollector, just for this test. func TestCancellation(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) pc, err := NewPowchainCollector(ctx) assert.NoError(t, err, "Unexpected error calling NewPowchainCollector") ticker := time.NewTicker(10 * time.Second) diff --git a/beacon-chain/execution/service_test.go b/beacon-chain/execution/service_test.go index 6e9da22dc7..288467da17 100644 --- a/beacon-chain/execution/service_test.go +++ b/beacon-chain/execution/service_test.go @@ -100,7 +100,7 @@ func TestStart_OK(t *testing.T) { waiter := verification.NewInitializerWaiter( c, forkchoice.NewROForkChoice(nil), nil) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -130,7 +130,7 @@ func TestStart_NoHttpEndpointDefinedFails_WithoutChainStarted(t *testing.T) { beaconDB := dbutil.SetupDB(t) testAcc, err := mock.Setup() require.NoError(t, err, "Unable to set up simulated backend") - _, err = NewService(context.Background(), + _, err = NewService(t.Context(), WithHttpEndpoint(""), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -149,7 +149,7 @@ func TestStop_OK(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -179,7 +179,7 @@ func TestService_Eth1Synced(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -189,7 +189,7 @@ func TestService_Eth1Synced(t *testing.T) { web3Service.depositContractCaller, err = contracts.NewDepositContractCaller(testAcc.ContractAddr, testAcc.Backend.Client()) require.NoError(t, err) - header, err := testAcc.Backend.Client().HeaderByNumber(context.Background(), nil) + header, err := testAcc.Backend.Client().HeaderByNumber(t.Context(), nil) require.NoError(t, err) currTime := header.Time now := time.Now() @@ -206,7 +206,7 @@ func TestFollowBlock_OK(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -221,7 +221,7 @@ func TestFollowBlock_OK(t *testing.T) { web3Service = setDefaultMocks(web3Service) web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend} - block, err := testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + block, err := testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) baseHeight := block.NumberU64() // process follow_distance blocks @@ -229,16 +229,16 @@ func TestFollowBlock_OK(t *testing.T) { for i := 0; i < int(params.BeaconConfig().Eth1FollowDistance); i++ { lastHash = testAcc.Backend.Commit() } - lb, err := testAcc.Backend.Client().BlockByHash(context.Background(), lastHash) + lb, err := testAcc.Backend.Client().BlockByHash(t.Context(), lastHash) require.NoError(t, err) log.Println(lb.NumberU64()) // set current height - block, err = testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + block, err = testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) web3Service.latestEth1Data.BlockHeight = block.NumberU64() web3Service.latestEth1Data.BlockTime = block.Time() - h, err := web3Service.followedBlockHeight(context.Background()) + h, err := web3Service.followedBlockHeight(t.Context()) require.NoError(t, err) assert.Equal(t, baseHeight, h, "Unexpected block height") numToForward := uint64(2) @@ -248,13 +248,13 @@ func TestFollowBlock_OK(t *testing.T) { testAcc.Backend.Commit() } - newBlock, err := testAcc.Backend.Client().BlockByNumber(context.Background(), nil) + newBlock, err := testAcc.Backend.Client().BlockByNumber(t.Context(), nil) require.NoError(t, err) // set current height web3Service.latestEth1Data.BlockHeight = newBlock.NumberU64() web3Service.latestEth1Data.BlockTime = newBlock.Time() - h, err = web3Service.followedBlockHeight(context.Background()) + h, err = web3Service.followedBlockHeight(t.Context()) require.NoError(t, err) assert.Equal(t, expectedHeight, h, "Unexpected block height") } @@ -294,7 +294,7 @@ func TestHandlePanic_OK(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), ) @@ -331,7 +331,7 @@ func TestLogTillGenesis_OK(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -371,19 +371,19 @@ func TestInitDepositCache_OK(t *testing.T) { var err error s.cfg.depositCache, err = depositsnapshot.New() require.NoError(t, err) - require.NoError(t, s.initDepositCaches(context.Background(), ctrs)) + require.NoError(t, s.initDepositCaches(t.Context(), ctrs)) - require.Equal(t, 0, len(s.cfg.depositCache.PendingContainers(context.Background(), nil))) + require.Equal(t, 0, len(s.cfg.depositCache.PendingContainers(t.Context(), nil))) blockRootA := [32]byte{'a'} emptyState, err := util.NewBeaconState() require.NoError(t, err) - require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(context.Background(), blockRootA)) - require.NoError(t, s.cfg.beaconDB.SaveState(context.Background(), emptyState, blockRootA)) + require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), blockRootA)) + require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, blockRootA)) s.chainStartData.Chainstarted = true - require.NoError(t, s.initDepositCaches(context.Background(), ctrs)) - require.Equal(t, 3, len(s.cfg.depositCache.PendingContainers(context.Background(), nil))) + require.NoError(t, s.initDepositCaches(t.Context(), ctrs)) + require.Equal(t, 3, len(s.cfg.depositCache.PendingContainers(t.Context(), nil))) } func TestInitDepositCacheWithFinalization_OK(t *testing.T) { @@ -432,9 +432,9 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) { var err error s.cfg.depositCache, err = depositsnapshot.New() require.NoError(t, err) - require.NoError(t, s.initDepositCaches(context.Background(), ctrs)) + require.NoError(t, s.initDepositCaches(t.Context(), ctrs)) - require.Equal(t, 0, len(s.cfg.depositCache.PendingContainers(context.Background(), nil))) + require.Equal(t, 0, len(s.cfg.depositCache.PendingContainers(t.Context(), nil))) headBlock := util.NewBeaconBlock() headRoot, err := headBlock.Block.HashTreeRoot() @@ -443,21 +443,21 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) { emptyState, err := util.NewBeaconState() require.NoError(t, err) - require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(context.Background(), headRoot)) - require.NoError(t, s.cfg.beaconDB.SaveState(context.Background(), emptyState, headRoot)) - require.NoError(t, stateGen.SaveState(context.Background(), headRoot, emptyState)) + require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), headRoot)) + require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, headRoot)) + require.NoError(t, stateGen.SaveState(t.Context(), headRoot, emptyState)) s.cfg.stateGen = stateGen require.NoError(t, emptyState.SetEth1DepositIndex(3)) - ctx := context.Background() + ctx := t.Context() require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(0), Root: headRoot[:]})) s.cfg.finalizedStateAtStartup = emptyState s.chainStartData.Chainstarted = true - require.NoError(t, s.initDepositCaches(context.Background(), ctrs)) + require.NoError(t, s.initDepositCaches(t.Context(), ctrs)) fDeposits, err := s.cfg.depositCache.FinalizedDeposits(ctx) require.NoError(t, err) - deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), fDeposits.MerkleTrieIndex(), nil) + deps := s.cfg.depositCache.NonFinalizedDeposits(t.Context(), fDeposits.MerkleTrieIndex(), nil) assert.Equal(t, 0, len(deps)) } @@ -470,7 +470,7 @@ func TestNewService_EarliestVotingBlock(t *testing.T) { t.Cleanup(func() { server.Stop() }) - web3Service, err := NewService(context.Background(), + web3Service, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -486,7 +486,7 @@ func TestNewService_EarliestVotingBlock(t *testing.T) { // Genesis not set followBlock := uint64(2000) - blk, err := web3Service.determineEarliestVotingBlock(context.Background(), followBlock) + blk, err := web3Service.determineEarliestVotingBlock(t.Context(), followBlock) require.NoError(t, err) assert.Equal(t, followBlock-conf.Eth1FollowDistance, blk, "unexpected earliest voting block") @@ -497,14 +497,14 @@ func TestNewService_EarliestVotingBlock(t *testing.T) { for i := 0; i < numToForward; i++ { testAcc.Backend.Commit() } - currHeader, err := testAcc.Backend.Client().HeaderByNumber(context.Background(), nil) + currHeader, err := testAcc.Backend.Client().HeaderByNumber(t.Context(), nil) require.NoError(t, err) currTime := currHeader.Time now := time.Now() err = testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0))) require.NoError(t, err) testAcc.Backend.Commit() - currHeader, err = testAcc.Backend.Client().HeaderByNumber(context.Background(), nil) + currHeader, err = testAcc.Backend.Client().HeaderByNumber(t.Context(), nil) require.NoError(t, err) currTime = currHeader.Time web3Service.latestEth1Data.BlockHeight = currHeader.Number.Uint64() @@ -512,7 +512,7 @@ func TestNewService_EarliestVotingBlock(t *testing.T) { web3Service.chainStartData.GenesisTime = currTime // With a current slot of zero, only request follow_blocks behind. - blk, err = web3Service.determineEarliestVotingBlock(context.Background(), followBlock) + blk, err = web3Service.determineEarliestVotingBlock(t.Context(), followBlock) require.NoError(t, err) assert.Equal(t, followBlock-conf.Eth1FollowDistance, blk, "unexpected earliest voting block") @@ -528,14 +528,14 @@ func TestNewService_Eth1HeaderRequLimit(t *testing.T) { t.Cleanup(func() { server.Stop() }) - s1, err := NewService(context.Background(), + s1, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), ) require.NoError(t, err, "unable to setup web3 ETH1.0 chain service") assert.Equal(t, defaultEth1HeaderReqLimit, s1.cfg.eth1HeaderReqLimit, "default eth1 header request limit not set") - s2, err := NewService(context.Background(), + s2, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -584,7 +584,7 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) { t.Cleanup(func() { srv.Stop() }) - s1, err := NewService(context.Background(), + s1, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), WithDepositCache(cache), @@ -594,11 +594,11 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) { require.NoError(t, err) assert.NoError(t, genState.SetSlot(1000)) - require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState)) - _, err = s1.validPowchainData(context.Background()) + require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState)) + _, err = s1.validPowchainData(t.Context()) require.NoError(t, err) - eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background()) + eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(t.Context()) assert.NoError(t, err) assert.NotNil(t, eth1Data) @@ -615,7 +615,7 @@ func TestService_InitializeCorrectly(t *testing.T) { t.Cleanup(func() { srv.Stop() }) - s1, err := NewService(context.Background(), + s1, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), WithDepositCache(cache), @@ -625,14 +625,14 @@ func TestService_InitializeCorrectly(t *testing.T) { require.NoError(t, err) assert.NoError(t, genState.SetSlot(1000)) - require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState)) - _, err = s1.validPowchainData(context.Background()) + require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState)) + _, err = s1.validPowchainData(t.Context()) require.NoError(t, err) - eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background()) + eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(t.Context()) assert.NoError(t, err) - assert.NoError(t, s1.initializeEth1Data(context.Background(), eth1Data)) + assert.NoError(t, s1.initializeEth1Data(t.Context(), eth1Data)) assert.Equal(t, int64(-1), s1.lastReceivedMerkleIndex, "received incorrect last received merkle index") } @@ -645,7 +645,7 @@ func TestService_EnsureValidPowchainData(t *testing.T) { t.Cleanup(func() { srv.Stop() }) - s1, err := NewService(context.Background(), + s1, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), WithDepositCache(cache), @@ -655,17 +655,17 @@ func TestService_EnsureValidPowchainData(t *testing.T) { require.NoError(t, err) assert.NoError(t, genState.SetSlot(1000)) - require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState)) + require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState)) - err = s1.cfg.beaconDB.SaveExecutionChainData(context.Background(), ðpb.ETH1ChainData{ + err = s1.cfg.beaconDB.SaveExecutionChainData(t.Context(), ðpb.ETH1ChainData{ ChainstartData: ðpb.ChainStartData{Chainstarted: true}, DepositContainers: []*ethpb.DepositContainer{{Index: 1}}, }) require.NoError(t, err) - _, err = s1.validPowchainData(context.Background()) + _, err = s1.validPowchainData(t.Context()) require.NoError(t, err) - eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background()) + eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(t.Context()) assert.NoError(t, err) assert.NotNil(t, eth1Data) @@ -741,7 +741,7 @@ func TestETH1Endpoints(t *testing.T) { beaconDB := dbutil.SetupDB(t) mbs := &mockBSUpdater{} - s1, err := NewService(context.Background(), + s1, err := NewService(t.Context(), WithHttpEndpoint(endpoints[0]), WithDepositContractAddress(testAcc.ContractAddr), WithDatabase(beaconDB), @@ -796,7 +796,7 @@ func TestService_FollowBlock(t *testing.T) { headerCache: newHeaderCache(), latestEth1Data: ðpb.LatestETH1Data{BlockTime: (3000 * 40) + followTime, BlockHeight: 3000}, } - h, err := s.followedBlockHeight(context.Background()) + h, err := s.followedBlockHeight(t.Context()) assert.NoError(t, err) // With a much higher blocktime, the follow height is respectively shortened. assert.Equal(t, uint64(2283), h) @@ -841,7 +841,7 @@ func TestService_migrateOldDepositTree(t *testing.T) { t.Cleanup(func() { srv.Stop() }) - s, err := NewService(context.Background(), + s, err := NewService(t.Context(), WithHttpEndpoint(endpoint), WithDatabase(beaconDB), WithDepositCache(cache), diff --git a/beacon-chain/forkchoice/doubly-linked-tree/ffg_update_test.go b/beacon-chain/forkchoice/doubly-linked-tree/ffg_update_test.go index 46cb78c981..466c022a6a 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/ffg_update_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/ffg_update_test.go @@ -14,10 +14,10 @@ import ( func TestFFGUpdates_OneBranch(t *testing.T) { f := setup(0, 0) f.justifiedBalances = []uint64{1, 1} - ctx := context.Background() + ctx := t.Context() // The head should always start at the finalized block. - r, err := f.Head(context.Background()) + r, err := f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis") @@ -29,13 +29,13 @@ func TestFFGUpdates_OneBranch(t *testing.T) { // 2 <- justified: 1, finalized: 0 // | // 3 <- justified: 2, finalized: 1 - state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err := prepareForkchoiceState(t.Context(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 2, 1) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 2, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -47,7 +47,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) { // 2 // | // 3 <- head - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 0") @@ -61,7 +61,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) { // 3 f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(1), Epoch: 1} f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(0), Epoch: 0} - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1") @@ -74,7 +74,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) { // | // 3 <- head f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(3), Epoch: 2} - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(3), r, "Incorrect head with justified epoch at 2") } @@ -82,9 +82,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) { func TestFFGUpdates_TwoBranches(t *testing.T) { f := setup(0, 0) f.justifiedBalances = []uint64{1, 1} - ctx := context.Background() + ctx := t.Context() - r, err := f.Head(context.Background()) + r, err := f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis") @@ -101,35 +101,35 @@ func TestFFGUpdates_TwoBranches(t *testing.T) { // | | // justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0 // Left branch. - state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err := prepareForkchoiceState(t.Context(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 2, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 3, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 3, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 4, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(9), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 4, indexToHash(9), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) // Right branch. - state, blkRoot, err = prepareForkchoiceState(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 2, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 3, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 3, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(8), indexToHash(6), params.BeaconConfig().ZeroHash, 1, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 4, indexToHash(8), indexToHash(6), params.BeaconConfig().ZeroHash, 1, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 4, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -145,7 +145,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) { // 7 8 // | | // 9 10 <-- head - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0") @@ -161,7 +161,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) { // 7 8 // | | // 9 10 - f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(1), 0) + f.ProcessAttestation(t.Context(), []uint64{0}, indexToHash(1), 0) // With the additional vote to the left branch, the head should be 9: // 0 <-- start @@ -175,7 +175,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) { // 7 8 // | | // head -> 9 10 - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(9), r, "Incorrect head with justified epoch at 0") @@ -191,7 +191,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) { // 7 8 // | | // 9 10 - f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(2), 0) + f.ProcessAttestation(t.Context(), []uint64{1}, indexToHash(2), 0) // With the additional vote to the right branch, the head should be 10: // 0 <-- start @@ -205,18 +205,18 @@ func TestFFGUpdates_TwoBranches(t *testing.T) { // 7 8 // | | // 9 10 <-- head - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0") f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)} - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(7), r, "Incorrect head with justified epoch at 0") } func setup(justifiedEpoch, finalizedEpoch primitives.Epoch) *ForkChoice { - ctx := context.Background() + ctx := context.TODO() f := New() f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash} f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: finalizedEpoch, Root: params.BeaconConfig().ZeroHash} diff --git a/beacon-chain/forkchoice/doubly-linked-tree/forkchoice_test.go b/beacon-chain/forkchoice/doubly-linked-tree/forkchoice_test.go index f1c65700c4..91229cd81d 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/forkchoice_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/forkchoice_test.go @@ -83,7 +83,7 @@ func prepareForkchoiceState( func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, st, roblock)) @@ -112,7 +112,7 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) { func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, st, roblock)) @@ -143,7 +143,7 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) { func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, st, roblock)) @@ -174,7 +174,7 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) { func TestForkChoice_IsCanonical(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, st, roblock)) @@ -205,7 +205,7 @@ func TestForkChoice_IsCanonical(t *testing.T) { func TestForkChoice_IsCanonicalReorg(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() st, roblock, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, st, roblock)) @@ -251,7 +251,7 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) { func TestForkChoice_AncestorRoot(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, st, roblock)) @@ -283,7 +283,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) { func TestForkChoice_AncestorEqualSlot(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, st, roblock)) @@ -298,7 +298,7 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) { func TestForkChoice_AncestorLowerSlot(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, st, roblock)) @@ -312,7 +312,7 @@ func TestForkChoice_AncestorLowerSlot(t *testing.T) { } func TestForkChoice_RemoveEquivocating(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(1, 1) // Insert a block it will be head st, roblock, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1) @@ -375,7 +375,7 @@ func indexToHash(i uint64) [32]byte { func TestForkChoice_UpdateJustifiedAndFinalizedCheckpoints(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() jr := [32]byte{'j'} fr := [32]byte{'f'} jc := &forkchoicetypes.Checkpoint{Root: jr, Epoch: 3} @@ -389,7 +389,7 @@ func TestForkChoice_UpdateJustifiedAndFinalizedCheckpoints(t *testing.T) { } func TestStore_CommonAncestor(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(0, 0) // /-- b -- d -- e @@ -635,14 +635,14 @@ func TestStore_InsertChain(t *testing.T) { for i := 0; i < len(blks); i++ { args[i] = blks[10-i-1] } - require.NoError(t, f.InsertChain(context.Background(), args)) + require.NoError(t, f.InsertChain(t.Context(), args)) f = setup(1, 1) - require.NoError(t, f.InsertChain(context.Background(), args[2:])) + require.NoError(t, f.InsertChain(t.Context(), args[2:])) } func TestForkChoice_UpdateCheckpoints(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string justified *forkchoicetypes.Checkpoint @@ -734,7 +734,7 @@ func TestForkChoice_UpdateCheckpoints(t *testing.T) { } func TestWeight(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(0, 0) root := [32]byte{'a'} @@ -760,14 +760,14 @@ func TestForkchoice_UpdateJustifiedBalances(t *testing.T) { f.balancesByRoot = func(context.Context, [32]byte) ([]uint64, error) { return balances, nil } - require.NoError(t, f.updateJustifiedBalances(context.Background(), [32]byte{})) + require.NoError(t, f.updateJustifiedBalances(t.Context(), [32]byte{})) require.Equal(t, uint64(7), f.numActiveValidators) require.Equal(t, uint64(430)/32, f.store.committeeWeight) require.DeepEqual(t, balances, f.justifiedBalances) } func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(0, 0) st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1) @@ -781,7 +781,7 @@ func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) { func TestForkChoiceIsViableForCheckpoint(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() st, blk, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0) require.NoError(t, err) @@ -873,7 +873,7 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) { func TestForkChoiceSlot(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() st, blk, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0) require.NoError(t, err) // No Node @@ -888,7 +888,7 @@ func TestForkChoiceSlot(t *testing.T) { func TestForkchoiceParentRoot(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() root1 := [32]byte{'a'} st, blk, err := prepareForkchoiceState(ctx, 3, root1, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0) require.NoError(t, err) @@ -914,7 +914,7 @@ func TestForkchoiceParentRoot(t *testing.T) { func TestForkChoice_CleanupInserting(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2) f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return f.justifiedBalances, errors.New("mock err") diff --git a/beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go b/beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go index 311ee0f3c9..006b813e1f 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go @@ -1,7 +1,6 @@ package doublylinkedtree import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -10,7 +9,7 @@ import ( func TestLastRoot(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0) require.NoError(t, err) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/no_vote_test.go b/beacon-chain/forkchoice/doubly-linked-tree/no_vote_test.go index 414ddaa819..bc1897e3f6 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/no_vote_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/no_vote_test.go @@ -1,7 +1,6 @@ package doublylinkedtree import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -12,10 +11,10 @@ import ( func TestNoVote_CanFindHead(t *testing.T) { f := setup(1, 1) f.justifiedBalances = make([]uint64, 16) - ctx := context.Background() + ctx := t.Context() // The head should always start at the finalized block. - r, err := f.Head(context.Background()) + r, err := f.Head(t.Context()) require.NoError(t, err) if r != params.BeaconConfig().ZeroHash { t.Errorf("Incorrect head with genesis") @@ -25,10 +24,10 @@ func TestNoVote_CanFindHead(t *testing.T) { // 0 // / // 2 <- head - state, blkRoot, err := prepareForkchoiceState(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) + state, blkRoot, err := prepareForkchoiceState(t.Context(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1") @@ -36,10 +35,10 @@ func TestNoVote_CanFindHead(t *testing.T) { // 0 // / \ // head -> 2 1 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1") @@ -49,10 +48,10 @@ func TestNoVote_CanFindHead(t *testing.T) { // head -> 2 1 // | // 3 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1") @@ -62,10 +61,10 @@ func TestNoVote_CanFindHead(t *testing.T) { // 2 1 // | | // head -> 4 3 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1") @@ -77,10 +76,10 @@ func TestNoVote_CanFindHead(t *testing.T) { // head -> 4 3 // | // 5 <- justified epoch = 2 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 1) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2") @@ -94,10 +93,10 @@ func TestNoVote_CanFindHead(t *testing.T) { // 5 // | // 6 <- head - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 1) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2") } diff --git a/beacon-chain/forkchoice/doubly-linked-tree/node_test.go b/beacon-chain/forkchoice/doubly-linked-tree/node_test.go index b83a8c600e..5f1d4fce35 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/node_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/node_test.go @@ -1,7 +1,6 @@ package doublylinkedtree import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -13,7 +12,7 @@ import ( func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blk)) @@ -40,7 +39,7 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) { func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blk)) @@ -70,7 +69,7 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) { func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() // Input child is not viable. state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 3) require.NoError(t, err) @@ -85,7 +84,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) { func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() // Input child is the best descendant state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) @@ -98,7 +97,7 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) { func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() // Input child is the best descendant state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) @@ -118,7 +117,7 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) { func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() // Input child is the best descendant state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) @@ -158,7 +157,7 @@ func TestNode_ViableForHead(t *testing.T) { func TestNode_LeadsToViableHead(t *testing.T) { f := setup(4, 3) - ctx := context.Background() + ctx := t.Context() state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blk)) @@ -183,7 +182,7 @@ func TestNode_LeadsToViableHead(t *testing.T) { func TestNode_SetFullyValidated(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() storeNodes := make([]*Node, 6) storeNodes[0] = f.store.treeRootNode // insert blocks in the fork pattern (optimistic status in parenthesis) @@ -260,7 +259,7 @@ func TestNode_SetFullyValidated(t *testing.T) { func TestNode_TimeStampsChecks(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() // early block driftGenesisTime(f, 1, 1) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/on_tick_test.go b/beacon-chain/forkchoice/doubly-linked-tree/on_tick_test.go index e83b8b181e..ce2930dd07 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/on_tick_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/on_tick_test.go @@ -1,7 +1,6 @@ package doublylinkedtree import ( - "context" "testing" forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types" @@ -11,7 +10,7 @@ import ( ) func TestStore_NewSlot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bj := [32]byte{'z'} type args struct { diff --git a/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go b/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go index 82cb63a865..8bcc44a539 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go @@ -1,7 +1,6 @@ package doublylinkedtree import ( - "context" "sort" "testing" @@ -172,7 +171,7 @@ func TestPruneInvalid(t *testing.T) { }, } for _, tc := range tests { - ctx := context.Background() + ctx := t.Context() f := setup(1, 1) state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1) @@ -212,7 +211,7 @@ func TestPruneInvalid(t *testing.T) { require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - roots, err := f.store.setOptimisticToInvalid(context.Background(), tc.root, tc.parentRoot, tc.payload) + roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.payload) if tc.wantedErr == nil { require.NoError(t, err) require.DeepEqual(t, tc.wantedRoots, roots) @@ -225,7 +224,7 @@ func TestPruneInvalid(t *testing.T) { // This is a regression test (10445) func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(1, 1) state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1) @@ -257,7 +256,7 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) { // D is invalid func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(1, 1) state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1) @@ -288,7 +287,7 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) { // // B is INVALID func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(1, 1) st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1) @@ -341,7 +340,7 @@ func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) { // // B is INVALID func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(1, 1) st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1) @@ -390,7 +389,7 @@ func TestSetOptimisticToValid(t *testing.T) { op, err := f.IsOptimistic([32]byte{}) require.NoError(t, err) require.Equal(t, true, op) - require.NoError(t, f.SetOptimisticToValid(context.Background(), [32]byte{})) + require.NoError(t, f.SetOptimisticToValid(t.Context(), [32]byte{})) op, err = f.IsOptimistic([32]byte{}) require.NoError(t, err) require.Equal(t, false, op) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost_test.go b/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost_test.go index 0412637a7d..e81bab1a5b 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/proposer_boost_test.go @@ -1,7 +1,6 @@ package doublylinkedtree import ( - "context" "testing" "time" @@ -24,7 +23,7 @@ func driftGenesisTime(f *ForkChoice, slot primitives.Slot, delay uint64) { // and release their withheld block of slot n+2 in an attempt to win fork choice. // If the honest proposal is boosted at slot n+2, it will win against this attacker. func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) { - ctx := context.Background() + ctx := t.Context() jEpoch, fEpoch := primitives.Epoch(0), primitives.Epoch(0) zeroHash := params.BeaconConfig().ZeroHash balances := make([]uint64, 64) // 64 active validators. @@ -428,7 +427,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) { } func TestForkChoice_BoostProposerRoot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() root := [32]byte{'A'} var zeroHash [32]byte @@ -476,7 +475,7 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) { // Regression test (11053) func TestForkChoice_missingProposerBoostRoots(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(1, 1) balances := make([]uint64, 64) // 64 active validators. for i := 0; i < len(balances); i++ { diff --git a/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks_test.go b/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks_test.go index 463c0c0f98..b6faa6077f 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/reorg_late_blocks_test.go @@ -1,7 +1,6 @@ package doublylinkedtree import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -17,7 +16,7 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) { f.store.committeeWeight += uint64(10) } f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch) - ctx := context.Background() + ctx := t.Context() driftGenesisTime(f, 1, 0) st, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0) require.NoError(t, err) @@ -114,7 +113,7 @@ func TestForkChoice_GetProposerHead(t *testing.T) { f.store.committeeWeight += uint64(10) } f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch) - ctx := context.Background() + ctx := t.Context() driftGenesisTime(f, 1, 0) parentRoot := [32]byte{'a'} st, blk, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/store_test.go b/beacon-chain/forkchoice/doubly-linked-tree/store_test.go index 3e1714d479..6b3e0017e4 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/store_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/store_test.go @@ -26,19 +26,19 @@ func TestStore_FinalizedEpoch(t *testing.T) { func TestStore_NodeCount(t *testing.T) { f := setup(0, 0) - state, blk, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) + state, blk, err := prepareForkchoiceState(t.Context(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) - require.NoError(t, f.InsertNode(context.Background(), state, blk)) + require.NoError(t, f.InsertNode(t.Context(), state, blk)) require.Equal(t, 2, f.NodeCount()) } func TestStore_NodeByRoot(t *testing.T) { f := setup(0, 0) - ctx := context.Background() - state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) + ctx := t.Context() + state, blkRoot, err := prepareForkchoiceState(t.Context(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) node0 := f.store.treeRootNode @@ -61,8 +61,8 @@ func TestStore_NodeByRoot(t *testing.T) { func TestForkChoice_HasNode(t *testing.T) { f := setup(0, 0) - ctx := context.Background() - state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) + ctx := t.Context() + state, blkRoot, err := prepareForkchoiceState(t.Context(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) require.Equal(t, true, f.HasNode(indexToHash(1))) @@ -72,47 +72,47 @@ func TestStore_Head_UnknownJustifiedRoot(t *testing.T) { f := setup(0, 0) f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}} - _, err := f.store.head(context.Background()) + _, err := f.store.head(t.Context()) assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err) } func TestStore_Head_Itself(t *testing.T) { f := setup(0, 0) - state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err := prepareForkchoiceState(t.Context(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) - require.NoError(t, f.InsertNode(context.Background(), state, blkRoot)) + require.NoError(t, f.InsertNode(t.Context(), state, blkRoot)) // Since the justified node does not have a best descendant, the best node // is itself. f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: indexToHash(1)} - h, err := f.store.head(context.Background()) + h, err := f.store.head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(1), h) } func TestStore_Head_BestDescendant(t *testing.T) { f := setup(0, 0) - ctx := context.Background() - state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) + ctx := t.Context() + state, blkRoot, err := prepareForkchoiceState(t.Context(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: indexToHash(1)} - h, err := f.store.head(context.Background()) + h, err := f.store.head(t.Context()) require.NoError(t, err) require.Equal(t, h, indexToHash(4)) } func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) f := setup(0, 0) state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) @@ -133,7 +133,7 @@ func TestStore_Insert(t *testing.T) { fc := &forkchoicetypes.Checkpoint{Epoch: 0} s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}} payloadHash := [32]byte{'a'} - ctx := context.Background() + ctx := t.Context() _, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1) require.NoError(t, err) _, err = s.insert(ctx, blk, 1, 1) @@ -152,7 +152,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) { // Define 100 nodes in store. numOfNodes := uint64(100) f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -166,7 +166,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) { // Finalized root is at index 99 so everything before 99 should be pruned. s.finalizedCheckpoint.Root = indexToHash(99) - require.NoError(t, s.prune(context.Background())) + require.NoError(t, s.prune(t.Context())) assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count") } @@ -174,7 +174,7 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) { // Define 100 nodes in store. numOfNodes := uint64(100) f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -188,12 +188,12 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) { // Finalized root is at index 11 so everything before 11 should be pruned. s.finalizedCheckpoint.Root = indexToHash(10) - require.NoError(t, s.prune(context.Background())) + require.NoError(t, s.prune(t.Context())) assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count") // One more time. s.finalizedCheckpoint.Root = indexToHash(20) - require.NoError(t, s.prune(context.Background())) + require.NoError(t, s.prune(t.Context())) assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count") } @@ -201,7 +201,7 @@ func TestStore_Prune_ReturnEarly(t *testing.T) { // Define 100 nodes in store. numOfNodes := uint64(100) f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -226,7 +226,7 @@ func TestStore_Prune_ReturnEarly(t *testing.T) { // And we finalize 1. As a result only 1 should survive func TestStore_Prune_NoDanglingBranch(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -236,7 +236,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) { s := f.store s.finalizedCheckpoint.Root = indexToHash(1) - require.NoError(t, s.prune(context.Background())) + require.NoError(t, s.prune(t.Context())) require.Equal(t, len(s.nodeByRoot), 1) require.Equal(t, len(s.nodeByPayload), 1) } @@ -252,7 +252,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) { // \ \ // J -- K -- L func TestStore_tips(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(1, 1) state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) @@ -307,7 +307,7 @@ func TestStore_tips(t *testing.T) { func TestStore_PruneMapsNodes(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -317,7 +317,7 @@ func TestStore_PruneMapsNodes(t *testing.T) { s := f.store s.finalizedCheckpoint.Root = indexToHash(1) - require.NoError(t, s.prune(context.Background())) + require.NoError(t, s.prune(t.Context())) require.Equal(t, len(s.nodeByRoot), 1) require.Equal(t, len(s.nodeByPayload), 1) @@ -330,7 +330,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) { // Make sure it doesn't underflow s.genesisTime = uint64(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix()) - ctx := context.Background() + ctx := t.Context() _, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, b, b, 1, 1) require.NoError(t, err) _, err = s.insert(ctx, blk, 1, 1) @@ -462,7 +462,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) { } func TestStore_TargetRootForEpoch(t *testing.T) { - ctx := context.Background() + ctx := t.Context() f := setup(1, 1) state, blk, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) @@ -610,7 +610,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) { func TestStore_CleanupInserting(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() st, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0) require.NoError(t, err) require.NotNil(t, f.InsertNode(ctx, st, blk)) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go b/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go index 27c921a705..2bbef62806 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go @@ -1,7 +1,6 @@ package doublylinkedtree import ( - "context" "testing" forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types" @@ -13,7 +12,7 @@ import ( func TestStore_SetUnrealizedEpochs(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -36,7 +35,7 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) { func TestStore_UpdateUnrealizedCheckpoints(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -63,7 +62,7 @@ func TestStore_UpdateUnrealizedCheckpoints(t *testing.T) { // B is the first block that justifies A. func TestStore_LongFork(t *testing.T) { f := setup(1, 1) - ctx := context.Background() + ctx := t.Context() state, blkRoot, err := prepareForkchoiceState(ctx, 75, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) @@ -116,7 +115,7 @@ func TestStore_LongFork(t *testing.T) { // E justifies A. G justifies E. func TestStore_NoDeadLock(t *testing.T) { f := setup(0, 0) - ctx := context.Background() + ctx := t.Context() // Epoch 1 blocks state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0) @@ -198,7 +197,7 @@ func TestStore_NoDeadLock(t *testing.T) { // D justifies and comes late. func TestStore_ForkNextEpoch(t *testing.T) { f := setup(1, 0) - ctx := context.Background() + ctx := t.Context() // Epoch 1 blocks (D does not arrive) state, blkRoot, err := prepareForkchoiceState(ctx, 92, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 0) @@ -258,7 +257,7 @@ func TestStore_ForkNextEpoch(t *testing.T) { } func TestStore_PullTips_Heuristics(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("Current epoch is justified", func(tt *testing.T) { f := setup(1, 1) st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/vote_test.go b/beacon-chain/forkchoice/doubly-linked-tree/vote_test.go index 60f5a172da..7ad3e4c3a4 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/vote_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/vote_test.go @@ -1,7 +1,6 @@ package doublylinkedtree import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -12,10 +11,10 @@ import ( func TestVotes_CanFindHead(t *testing.T) { f := setup(1, 1) f.justifiedBalances = []uint64{1, 1} - ctx := context.Background() + ctx := t.Context() // The head should always start at the finalized block. - r, err := f.Head(context.Background()) + r, err := f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis") @@ -23,11 +22,11 @@ func TestVotes_CanFindHead(t *testing.T) { // 0 // / // 2 <- head - state, blkRoot, err := prepareForkchoiceState(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) + state, blkRoot, err := prepareForkchoiceState(t.Context(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1") @@ -35,11 +34,11 @@ func TestVotes_CanFindHead(t *testing.T) { // 0 // / \ // head -> 2 1 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1") @@ -47,8 +46,8 @@ func TestVotes_CanFindHead(t *testing.T) { // 0 // / \ // 2 1 <- +vote, new head - f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(1), 2) - r, err = f.Head(context.Background()) + f.ProcessAttestation(t.Context(), []uint64{0}, indexToHash(1), 2) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(1), r, "Incorrect head for with justified epoch at 1") @@ -56,8 +55,8 @@ func TestVotes_CanFindHead(t *testing.T) { // 0 // / \ // vote, new head -> 2 1 - f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(2), 2) - r, err = f.Head(context.Background()) + f.ProcessAttestation(t.Context(), []uint64{1}, indexToHash(2), 2) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1") @@ -67,11 +66,11 @@ func TestVotes_CanFindHead(t *testing.T) { // head -> 2 1 // | // 3 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1") @@ -81,8 +80,8 @@ func TestVotes_CanFindHead(t *testing.T) { // head -> 2 1 <- old vote // | // 3 <- new vote - f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(3), 3) - r, err = f.Head(context.Background()) + f.ProcessAttestation(t.Context(), []uint64{0}, indexToHash(3), 3) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1") @@ -92,8 +91,8 @@ func TestVotes_CanFindHead(t *testing.T) { // old vote -> 2 1 <- new vote // | // 3 <- head - f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(1), 3) - r, err = f.Head(context.Background()) + f.ProcessAttestation(t.Context(), []uint64{1}, indexToHash(1), 3) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 1") @@ -105,11 +104,11 @@ func TestVotes_CanFindHead(t *testing.T) { // 3 // | // 4 <- head - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1") @@ -125,11 +124,11 @@ func TestVotes_CanFindHead(t *testing.T) { // 5 <- head, justified epoch = 2 // // We set this node's slot to be 64 so that when pruning below we do not prune its child - state, blkRoot, err = prepareForkchoiceState(context.Background(), 2*params.BeaconConfig().SlotsPerEpoch, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 2) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 2*params.BeaconConfig().SlotsPerEpoch, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 2) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2") @@ -143,15 +142,15 @@ func TestVotes_CanFindHead(t *testing.T) { // 4 // / \ // 5 6 <- head, justified epoch = 3 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 3, 2) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 3, 2) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 3") // Moved 2 votes to block 5: - f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4) + f.ProcessAttestation(t.Context(), []uint64{0, 1}, indexToHash(5), 4) // Inset blocks 7 and 8 // 6 should still be the head, even though 5 has all the votes. @@ -168,13 +167,13 @@ func TestVotes_CanFindHead(t *testing.T) { // 7 // | // 8 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 2) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 2) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(8), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 2) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(8), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 2) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 3") @@ -195,10 +194,10 @@ func TestVotes_CanFindHead(t *testing.T) { // 8 // | // 10 <- head - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3") @@ -218,52 +217,52 @@ func TestVotes_CanFindHead(t *testing.T) { // 8 // / \ // 9 10 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3") // Move two votes for 10, verify it's head - f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(10), 5) - r, err = f.Head(context.Background()) + f.ProcessAttestation(t.Context(), []uint64{0, 1}, indexToHash(10), 5) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3") // Add 3 more validators to the system. f.justifiedBalances = []uint64{1, 1, 1, 1, 1} // The new validators voted for 9 - f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(9), 5) + f.ProcessAttestation(t.Context(), []uint64{2, 3, 4}, indexToHash(9), 5) // The new head should be 9. - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3") // Set the f.justifiedBalances of the last 2 validators to 0. f.justifiedBalances = []uint64{1, 1, 1, 0, 0} // The head should be back to 10. - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3") // Set the f.justifiedBalances back to normal. f.justifiedBalances = []uint64{1, 1, 1, 1, 1} // The head should be back to 9. - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3") // Remove the last 2 validators. f.justifiedBalances = []uint64{1, 1, 1} // The head should be back to 10. - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3") - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3") @@ -284,12 +283,12 @@ func TestVotes_CanFindHead(t *testing.T) { // / \ // 9 10 f.store.finalizedCheckpoint.Root = indexToHash(5) - require.NoError(t, f.store.prune(context.Background())) + require.NoError(t, f.store.prune(t.Context())) assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune") // we pruned artificially the justified root. f.store.justifiedCheckpoint.Root = indexToHash(5) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3") @@ -303,11 +302,11 @@ func TestVotes_CanFindHead(t *testing.T) { // 10 9 // | // head-> 11 - state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(11), indexToHash(10), params.BeaconConfig().ZeroHash, 3, 2) + state, blkRoot, err = prepareForkchoiceState(t.Context(), 0, indexToHash(11), indexToHash(10), params.BeaconConfig().ZeroHash, 3, 2) require.NoError(t, err) require.NoError(t, f.InsertNode(ctx, state, blkRoot)) - r, err = f.Head(context.Background()) + r, err = f.Head(t.Context()) require.NoError(t, err) assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 3") } diff --git a/beacon-chain/monitor/process_attestation_test.go b/beacon-chain/monitor/process_attestation_test.go index 19dda2640b..820d0871b5 100644 --- a/beacon-chain/monitor/process_attestation_test.go +++ b/beacon-chain/monitor/process_attestation_test.go @@ -2,7 +2,6 @@ package monitor import ( "bytes" - "context" "testing" "github.com/OffchainLabs/prysm/v6/consensus-types/blocks" @@ -16,7 +15,7 @@ import ( ) func TestGetAttestingIndices(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconState, _ := util.DeterministicGenesisState(t, 256) att := ðpb.Attestation{ Data: ðpb.AttestationData{ @@ -54,7 +53,7 @@ func TestProcessIncludedAttestationTwoTracked(t *testing.T) { }, AggregationBits: bitfield.Bitlist{0b11, 0b1}, } - s.processIncludedAttestation(context.Background(), state, att) + s.processIncludedAttestation(t.Context(), state, att) wanted1 := "\"Attestation included\" balanceChange=0 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2" wanted2 := "\"Attestation included\" balanceChange=100000000 correctHead=true correctSource=true correctTarget=true head=0x68656c6c6f2d inclusionSlot=2 newBalance=32000000000 prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12" require.LogsContain(t, hook, wanted1) @@ -64,7 +63,7 @@ func TestProcessIncludedAttestationTwoTracked(t *testing.T) { func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() s := setupService(t) state, _ := util.DeterministicGenesisStateAltair(t, 256) @@ -95,7 +94,7 @@ func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) { } func TestProcessUnaggregatedAttestationStateCached(t *testing.T) { - ctx := context.Background() + ctx := t.Context() hook := logTest.NewGlobal() s := setupService(t) @@ -123,7 +122,7 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) { AggregationBits: bitfield.Bitlist{0b11, 0b1}, } require.NoError(t, s.config.StateGen.SaveState(ctx, root, state)) - s.processUnaggregatedAttestation(context.Background(), att) + s.processUnaggregatedAttestation(t.Context(), att) wanted1 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=2" wanted2 := "\"Processed unaggregated attestation\" head=0x68656c6c6f2d prefix=monitor slot=1 source=0x68656c6c6f2d target=0x68656c6c6f2d validatorIndex=12" require.LogsContain(t, hook, wanted1) @@ -133,7 +132,7 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) { func TestProcessAggregatedAttestationStateNotCached(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() s := setupService(t) state, _ := util.DeterministicGenesisStateAltair(t, 256) @@ -169,7 +168,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) { func TestProcessAggregatedAttestationStateCached(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() s := setupService(t) state, _ := util.DeterministicGenesisStateAltair(t, 256) participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} @@ -208,7 +207,7 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) { func TestProcessAttestations(t *testing.T) { hook := logTest.NewGlobal() s := setupService(t) - ctx := context.Background() + ctx := t.Context() state, _ := util.DeterministicGenesisStateAltair(t, 256) require.NoError(t, state.SetSlot(2)) require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13))) diff --git a/beacon-chain/monitor/process_block_test.go b/beacon-chain/monitor/process_block_test.go index f539828df8..e1d30a8013 100644 --- a/beacon-chain/monitor/process_block_test.go +++ b/beacon-chain/monitor/process_block_test.go @@ -1,7 +1,6 @@ package monitor import ( - "context" "fmt" "testing" @@ -186,7 +185,7 @@ func TestProcessProposedBlock(t *testing.T) { func TestProcessBlock_AllEventsTrackedVals(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() genesis, keys := util.DeterministicGenesisStateAltair(t, 64) c, err := altair.NextSyncCommittee(ctx, genesis) diff --git a/beacon-chain/monitor/service_test.go b/beacon-chain/monitor/service_test.go index df32d22432..abc75f4bfc 100644 --- a/beacon-chain/monitor/service_test.go +++ b/beacon-chain/monitor/service_test.go @@ -96,7 +96,7 @@ func setupService(t *testing.T) *Service { InitialSyncComplete: make(chan struct{}), }, - ctx: context.Background(), + ctx: t.Context(), TrackedValidators: trackedVals, latestPerformance: latestPerformance, aggregatedPerformance: aggregatedPerformance, @@ -133,7 +133,7 @@ func TestUpdateSyncCommitteeTrackedVals(t *testing.T) { func TestNewService(t *testing.T) { config := &ValidatorMonitorConfig{} var tracked []primitives.ValidatorIndex - ctx := context.Background() + ctx := t.Context() _, err := NewService(ctx, config, tracked) require.NoError(t, err) } @@ -156,7 +156,7 @@ func TestStart(t *testing.T) { func TestInitializePerformanceStructures(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() s := setupService(t) state, err := s.config.HeadFetcher.HeadState(ctx) require.NoError(t, err) @@ -197,7 +197,7 @@ func TestInitializePerformanceStructures(t *testing.T) { } func TestMonitorRoutine(t *testing.T) { - ctx := context.Background() + ctx := t.Context() hook := logTest.NewGlobal() s := setupService(t) stateChannel := make(chan *feed.Event, 1) @@ -243,7 +243,7 @@ func TestMonitorRoutine(t *testing.T) { } func TestWaitForSync(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) s := &Service{ctx: ctx} syncChan := make(chan struct{}) @@ -260,7 +260,7 @@ func TestWaitForSync(t *testing.T) { } func TestWaitForSyncCanceled(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) s := &Service{ctx: ctx} syncChan := make(chan struct{}) diff --git a/beacon-chain/operations/attestations/kv/aggregated_test.go b/beacon-chain/operations/attestations/kv/aggregated_test.go index cb23fb595b..e91b589d30 100644 --- a/beacon-chain/operations/attestations/kv/aggregated_test.go +++ b/beacon-chain/operations/attestations/kv/aggregated_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "sort" "testing" @@ -33,10 +32,10 @@ func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) { att8 := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2}, AggregationBits: bitfield.Bitlist{0b1001}, Signature: sig2.Marshal()}) atts := []ethpb.Att{att1, att2, att3, att4, att5, att6, att7, att8} require.NoError(t, cache.SaveUnaggregatedAttestations(atts)) - require.NoError(t, cache.AggregateUnaggregatedAttestations(context.Background())) + require.NoError(t, cache.AggregateUnaggregatedAttestations(t.Context())) - require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(context.Background(), 1, 0)), "Did not aggregate correctly") - require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(context.Background(), 2, 0)), "Did not aggregate correctly") + require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(t.Context(), 1, 0)), "Did not aggregate correctly") + require.Equal(t, 1, len(cache.AggregatedAttestationsBySlotIndex(t.Context(), 2, 0)), "Did not aggregate correctly") } func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) { @@ -511,7 +510,7 @@ func TestKV_Aggregated_AggregatedAttestationsBySlotIndex(t *testing.T) { for _, att := range atts { require.NoError(t, cache.SaveAggregatedAttestation(att)) } - ctx := context.Background() + ctx := t.Context() returned := cache.AggregatedAttestationsBySlotIndex(ctx, 1, 1) assert.DeepEqual(t, []*ethpb.Attestation{att1}, returned) returned = cache.AggregatedAttestationsBySlotIndex(ctx, 1, 2) @@ -537,7 +536,7 @@ func TestKV_Aggregated_AggregatedAttestationsBySlotIndexElectra(t *testing.T) { for _, att := range atts { require.NoError(t, cache.SaveAggregatedAttestation(att)) } - ctx := context.Background() + ctx := t.Context() returned := cache.AggregatedAttestationsBySlotIndexElectra(ctx, 1, 1) assert.DeepEqual(t, []*ethpb.AttestationElectra{att1}, returned) returned = cache.AggregatedAttestationsBySlotIndexElectra(ctx, 1, 2) diff --git a/beacon-chain/operations/attestations/kv/unaggregated_test.go b/beacon-chain/operations/attestations/kv/unaggregated_test.go index f231f1863c..56750dc44c 100644 --- a/beacon-chain/operations/attestations/kv/unaggregated_test.go +++ b/beacon-chain/operations/attestations/kv/unaggregated_test.go @@ -2,7 +2,6 @@ package kv import ( "bytes" - "context" "sort" "testing" @@ -291,7 +290,7 @@ func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndex(t *testing.T) { for _, att := range atts { require.NoError(t, cache.SaveUnaggregatedAttestation(att)) } - ctx := context.Background() + ctx := t.Context() returned := cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 1) assert.DeepEqual(t, []*ethpb.Attestation{att1}, returned) returned = cache.UnaggregatedAttestationsBySlotIndex(ctx, 1, 2) @@ -317,7 +316,7 @@ func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndexElectra(t *testing.T for _, att := range atts { require.NoError(t, cache.SaveUnaggregatedAttestation(att)) } - ctx := context.Background() + ctx := t.Context() returned := cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 1, 1) assert.DeepEqual(t, []*ethpb.AttestationElectra{att1}, returned) returned = cache.UnaggregatedAttestationsBySlotIndexElectra(ctx, 1, 2) diff --git a/beacon-chain/operations/attestations/prepare_forkchoice_test.go b/beacon-chain/operations/attestations/prepare_forkchoice_test.go index c456ab2af3..108ebb4db5 100644 --- a/beacon-chain/operations/attestations/prepare_forkchoice_test.go +++ b/beacon-chain/operations/attestations/prepare_forkchoice_test.go @@ -1,7 +1,6 @@ package attestations import ( - "context" "fmt" "sort" "testing" @@ -17,7 +16,7 @@ import ( ) func TestBatchAttestations_Multiple(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) + s, err := NewService(t.Context(), &Config{Pool: NewPool()}) require.NoError(t, err) priv, err := bls.RandKey() @@ -91,7 +90,7 @@ func TestBatchAttestations_Multiple(t *testing.T) { for _, att := range blockAtts { require.NoError(t, s.cfg.Pool.SaveBlockAttestation(att)) } - require.NoError(t, s.batchForkChoiceAtts(context.Background())) + require.NoError(t, s.batchForkChoiceAtts(t.Context())) wanted, err := attaggregation.Aggregate([]ethpb.Att{aggregatedAtts[0], blockAtts[0]}) require.NoError(t, err) @@ -102,7 +101,7 @@ func TestBatchAttestations_Multiple(t *testing.T) { require.NoError(t, err) wanted = append(wanted, aggregated...) - require.NoError(t, s.cfg.Pool.AggregateUnaggregatedAttestations(context.Background())) + require.NoError(t, s.cfg.Pool.AggregateUnaggregatedAttestations(t.Context())) received := s.cfg.Pool.ForkchoiceAttestations() sort.Slice(received, func(i, j int) bool { @@ -116,7 +115,7 @@ func TestBatchAttestations_Multiple(t *testing.T) { } func TestBatchAttestations_Single(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) + s, err := NewService(t.Context(), &Config{Pool: NewPool()}) require.NoError(t, err) priv, err := bls.RandKey() @@ -148,7 +147,7 @@ func TestBatchAttestations_Single(t *testing.T) { for _, att := range blockAtts { require.NoError(t, s.cfg.Pool.SaveBlockAttestation(att)) } - require.NoError(t, s.batchForkChoiceAtts(context.Background())) + require.NoError(t, s.batchForkChoiceAtts(t.Context())) wanted, err := attaggregation.Aggregate(append(aggregatedAtts, unaggregatedAtts...)) require.NoError(t, err) @@ -161,7 +160,7 @@ func TestBatchAttestations_Single(t *testing.T) { } func TestAggregateAndSaveForkChoiceAtts_Single(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) + s, err := NewService(t.Context(), &Config{Pool: NewPool()}) require.NoError(t, err) priv, err := bls.RandKey() @@ -185,7 +184,7 @@ func TestAggregateAndSaveForkChoiceAtts_Single(t *testing.T) { } func TestAggregateAndSaveForkChoiceAtts_Multiple(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) + s, err := NewService(t.Context(), &Config{Pool: NewPool()}) require.NoError(t, err) priv, err := bls.RandKey() @@ -238,7 +237,7 @@ func TestAggregateAndSaveForkChoiceAtts_Multiple(t *testing.T) { } func TestSeenAttestations_PresentInCache(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) + s, err := NewService(t.Context(), &Config{Pool: NewPool()}) require.NoError(t, err) ad1 := util.HydrateAttestationData(ðpb.AttestationData{}) @@ -312,7 +311,7 @@ func TestService_seen(t *testing.T) { }, } - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) + s, err := NewService(t.Context(), &Config{Pool: NewPool()}) require.NoError(t, err) for i, tt := range tests { diff --git a/beacon-chain/operations/attestations/prune_expired_test.go b/beacon-chain/operations/attestations/prune_expired_test.go index 7ab7046a8e..789d6f86c1 100644 --- a/beacon-chain/operations/attestations/prune_expired_test.go +++ b/beacon-chain/operations/attestations/prune_expired_test.go @@ -18,7 +18,7 @@ import ( ) func TestPruneExpired_Ticker(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second) defer cancel() s, err := NewService(ctx, &Config{ @@ -83,7 +83,7 @@ func TestPruneExpired_Ticker(t *testing.T) { } func TestPruneExpired_PruneExpiredAtts(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) + s, err := NewService(t.Context(), &Config{Pool: NewPool()}) require.NoError(t, err) ad1 := util.HydrateAttestationData(ðpb.AttestationData{}) @@ -118,7 +118,7 @@ func TestPruneExpired_PruneExpiredAtts(t *testing.T) { } func TestPruneExpired_Expired(t *testing.T) { - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) + s, err := NewService(t.Context(), &Config{Pool: NewPool()}) require.NoError(t, err) // Rewind back one epoch worth of time. @@ -133,7 +133,7 @@ func TestPruneExpired_ExpiredDeneb(t *testing.T) { cfg.DenebForkEpoch = 3 params.OverrideBeaconConfig(cfg) - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) + s, err := NewService(t.Context(), &Config{Pool: NewPool()}) require.NoError(t, err) // Rewind back 4 epochs + 10 slots worth of time. diff --git a/beacon-chain/operations/attestations/service_test.go b/beacon-chain/operations/attestations/service_test.go index 048115da74..70a45f08a2 100644 --- a/beacon-chain/operations/attestations/service_test.go +++ b/beacon-chain/operations/attestations/service_test.go @@ -10,7 +10,7 @@ import ( ) func TestStop_OK(t *testing.T) { - s, err := NewService(context.Background(), &Config{}) + s, err := NewService(t.Context(), &Config{}) require.NoError(t, err) require.NoError(t, s.Stop(), "Unable to stop attestation pool service") assert.ErrorContains(t, context.Canceled.Error(), s.ctx.Err(), "Context was not canceled") diff --git a/beacon-chain/operations/slashings/service_attester_test.go b/beacon-chain/operations/slashings/service_attester_test.go index 4197d64e07..1227fc0d22 100644 --- a/beacon-chain/operations/slashings/service_attester_test.go +++ b/beacon-chain/operations/slashings/service_attester_test.go @@ -1,7 +1,6 @@ package slashings import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/state" @@ -305,7 +304,7 @@ func TestPool_InsertAttesterSlashing(t *testing.T) { } var err error for i := 0; i < len(tt.args.slashings); i++ { - err = p.InsertAttesterSlashing(context.Background(), beaconState, tt.args.slashings[i]) + err = p.InsertAttesterSlashing(t.Context(), beaconState, tt.args.slashings[i]) if tt.fields.wantErr[i] { assert.NotNil(t, err) } else { @@ -363,8 +362,8 @@ func TestPool_InsertAttesterSlashing_SigFailsVerify_ClearPool(t *testing.T) { p := &Pool{ pendingAttesterSlashing: make([]*PendingAttesterSlashing, 0), } - require.NoError(t, p.InsertAttesterSlashing(context.Background(), beaconState, slashings[0])) - err := p.InsertAttesterSlashing(context.Background(), beaconState, slashings[1]) + require.NoError(t, p.InsertAttesterSlashing(t.Context(), beaconState, slashings[0])) + err := p.InsertAttesterSlashing(t.Context(), beaconState, slashings[1]) require.ErrorContains(t, "could not verify attester slashing", err, "Expected error when inserting slashing with bad sig") assert.Equal(t, 1, len(p.pendingAttesterSlashing)) } @@ -572,7 +571,7 @@ func TestPool_PendingAttesterSlashings(t *testing.T) { p := &Pool{ pendingAttesterSlashing: tt.fields.pending, } - assert.DeepEqual(t, tt.want, p.PendingAttesterSlashings(context.Background(), beaconState, tt.fields.all)) + assert.DeepEqual(t, tt.want, p.PendingAttesterSlashings(t.Context(), beaconState, tt.fields.all)) }) } } @@ -636,7 +635,7 @@ func TestPool_PendingAttesterSlashings_AfterElectra(t *testing.T) { p := &Pool{ pendingAttesterSlashing: tt.fields.pending, } - assert.DeepEqual(t, tt.want, p.PendingAttesterSlashings(context.Background(), beaconState, tt.fields.all)) + assert.DeepEqual(t, tt.want, p.PendingAttesterSlashings(t.Context(), beaconState, tt.fields.all)) }) } } @@ -707,7 +706,7 @@ func TestPool_PendingAttesterSlashings_Slashed(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &Pool{pendingAttesterSlashing: tt.fields.pending} - assert.DeepEqual(t, tt.want, p.PendingAttesterSlashings(context.Background(), beaconState, tt.fields.all /*noLimit*/)) + assert.DeepEqual(t, tt.want, p.PendingAttesterSlashings(t.Context(), beaconState, tt.fields.all /*noLimit*/)) }) } } @@ -735,5 +734,5 @@ func TestPool_PendingAttesterSlashings_NoDuplicates(t *testing.T) { p := &Pool{ pendingAttesterSlashing: pendingSlashings, } - assert.DeepEqual(t, slashings[0:2], p.PendingAttesterSlashings(context.Background(), beaconState, false /*noLimit*/)) + assert.DeepEqual(t, slashings[0:2], p.PendingAttesterSlashings(t.Context(), beaconState, false /*noLimit*/)) } diff --git a/beacon-chain/operations/slashings/service_new_test.go b/beacon-chain/operations/slashings/service_new_test.go index fbab976f34..529875010b 100644 --- a/beacon-chain/operations/slashings/service_new_test.go +++ b/beacon-chain/operations/slashings/service_new_test.go @@ -1,7 +1,6 @@ package slashings import ( - "context" "testing" "time" @@ -15,7 +14,7 @@ import ( ) func TestConvertToElectraWithTimer(t *testing.T) { - ctx := context.Background() + ctx := t.Context() cfg := params.BeaconConfig().Copy() cfg.ElectraForkEpoch = 1 diff --git a/beacon-chain/operations/slashings/service_proposer_test.go b/beacon-chain/operations/slashings/service_proposer_test.go index fb22c2a36b..008a128278 100644 --- a/beacon-chain/operations/slashings/service_proposer_test.go +++ b/beacon-chain/operations/slashings/service_proposer_test.go @@ -1,7 +1,6 @@ package slashings import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -163,7 +162,7 @@ func TestPool_InsertProposerSlashing(t *testing.T) { } var err error for i := 0; i < len(tt.args.slashings); i++ { - err = p.InsertProposerSlashing(context.Background(), beaconState, tt.args.slashings[i]) + err = p.InsertProposerSlashing(t.Context(), beaconState, tt.args.slashings[i]) } if tt.fields.wantedErr != "" { require.ErrorContains(t, tt.fields.wantedErr, err) @@ -199,8 +198,8 @@ func TestPool_InsertProposerSlashing_SigFailsVerify_ClearPool(t *testing.T) { pendingProposerSlashing: make([]*ethpb.ProposerSlashing, 0), } // We only want a single slashing to remain. - require.NoError(t, p.InsertProposerSlashing(context.Background(), beaconState, slashings[0])) - err := p.InsertProposerSlashing(context.Background(), beaconState, slashings[1]) + require.NoError(t, p.InsertProposerSlashing(t.Context(), beaconState, slashings[0])) + err := p.InsertProposerSlashing(t.Context(), beaconState, slashings[1]) require.ErrorContains(t, "could not verify proposer slashing", err, "Expected slashing with bad signature to fail") assert.Equal(t, 1, len(p.pendingProposerSlashing)) } @@ -374,7 +373,7 @@ func TestPool_PendingProposerSlashings(t *testing.T) { p := &Pool{ pendingProposerSlashing: tt.fields.pending, } - assert.DeepEqual(t, tt.want, p.PendingProposerSlashings(context.Background(), beaconState, tt.fields.noLimit)) + assert.DeepEqual(t, tt.want, p.PendingProposerSlashings(t.Context(), beaconState, tt.fields.noLimit)) }) } } @@ -430,7 +429,7 @@ func TestPool_PendingProposerSlashings_Slashed(t *testing.T) { p := &Pool{ pendingProposerSlashing: tt.fields.pending, } - result := p.PendingProposerSlashings(context.Background(), beaconState, tt.fields.all /*noLimit*/) + result := p.PendingProposerSlashings(t.Context(), beaconState, tt.fields.all /*noLimit*/) t.Log(tt.want[0].Header_1.Header.ProposerIndex) t.Log(result[0].Header_1.Header.ProposerIndex) assert.DeepEqual(t, tt.want, result) diff --git a/beacon-chain/p2p/broadcaster_test.go b/beacon-chain/p2p/broadcaster_test.go index f0415b1f6d..c255d3529e 100644 --- a/beacon-chain/p2p/broadcaster_test.go +++ b/beacon-chain/p2p/broadcaster_test.go @@ -77,7 +77,7 @@ func TestService_Broadcast(t *testing.T) { wg.Add(1) go func(tt *testing.T) { defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second) defer cancel() incomingMessage, err := sub.Next(ctx) @@ -91,7 +91,7 @@ func TestService_Broadcast(t *testing.T) { }(t) // Broadcast to peers and wait. - require.NoError(t, p.Broadcast(context.Background(), msg)) + require.NoError(t, p.Broadcast(t.Context(), msg)) if util.WaitTimeout(&wg, 1*time.Second) { t.Error("Failed to receive pubsub within 1s") } @@ -102,7 +102,7 @@ func TestService_Broadcast_ReturnsErr_TopicNotMapped(t *testing.T) { genesisTime: time.Now(), genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), } - assert.ErrorContains(t, ErrMessageNotMapped.Error(), p.Broadcast(context.Background(), &testpb.AddressBook{})) + assert.ErrorContains(t, ErrMessageNotMapped.Error(), p.Broadcast(t.Context(), &testpb.AddressBook{})) } func TestService_Attestation_Subnet(t *testing.T) { @@ -165,7 +165,7 @@ func TestService_BroadcastAttestation(t *testing.T) { genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), subnetsLock: make(map[uint64]*sync.RWMutex), subnetsLockLock: sync.Mutex{}, - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -191,7 +191,7 @@ func TestService_BroadcastAttestation(t *testing.T) { wg.Add(1) go func(tt *testing.T) { defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second) defer cancel() incomingMessage, err := sub.Next(ctx) @@ -205,7 +205,7 @@ func TestService_BroadcastAttestation(t *testing.T) { }(t) // Attempt to broadcast nil object should fail. - ctx := context.Background() + ctx := t.Context() require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastAttestation(ctx, subnet, nil)) // Broadcast to peers and wait. @@ -286,20 +286,20 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) { } }() - ps1, err := pubsub.NewGossipSub(context.Background(), hosts[0], + ps1, err := pubsub.NewGossipSub(t.Context(), hosts[0], pubsub.WithMessageSigning(false), pubsub.WithStrictSignatureVerification(false), ) require.NoError(t, err) - ps2, err := pubsub.NewGossipSub(context.Background(), hosts[1], + ps2, err := pubsub.NewGossipSub(t.Context(), hosts[1], pubsub.WithMessageSigning(false), pubsub.WithStrictSignatureVerification(false), ) require.NoError(t, err) p := &Service{ host: hosts[0], - ctx: context.Background(), + ctx: t.Context(), pubsub: ps1, dv5Listener: listeners[0], joinedTopics: map[string]*pubsub.Topic{}, @@ -308,14 +308,14 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) { genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), subnetsLock: make(map[uint64]*sync.RWMutex), subnetsLockLock: sync.Mutex{}, - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } p2 := &Service{ host: hosts[1], - ctx: context.Background(), + ctx: t.Context(), pubsub: ps2, dv5Listener: listeners[1], joinedTopics: map[string]*pubsub.Topic{}, @@ -324,7 +324,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) { genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), subnetsLock: make(map[uint64]*sync.RWMutex), subnetsLockLock: sync.Mutex{}, - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -365,7 +365,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) { wg.Add(1) go func(tt *testing.T) { defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 4*time.Second) defer cancel() incomingMessage, err := sub.Next(ctx) @@ -379,7 +379,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) { }(t) // Broadcast to peers and wait. - require.NoError(t, p.BroadcastAttestation(context.Background(), subnet, msg)) + require.NoError(t, p.BroadcastAttestation(t.Context(), subnet, msg)) if util.WaitTimeout(&wg, 4*time.Second) { t.Error("Failed to receive pubsub within 4s") } @@ -402,7 +402,7 @@ func TestService_BroadcastSyncCommittee(t *testing.T) { genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), subnetsLock: make(map[uint64]*sync.RWMutex), subnetsLockLock: sync.Mutex{}, - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -428,7 +428,7 @@ func TestService_BroadcastSyncCommittee(t *testing.T) { wg.Add(1) go func(tt *testing.T) { defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second) defer cancel() incomingMessage, err := sub.Next(ctx) @@ -442,7 +442,7 @@ func TestService_BroadcastSyncCommittee(t *testing.T) { }(t) // Broadcasting nil should fail. - ctx := context.Background() + ctx := t.Context() require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastSyncCommitteeMessage(ctx, subnet, nil)) // Broadcast to peers and wait. @@ -467,7 +467,7 @@ func TestService_BroadcastBlob(t *testing.T) { genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), subnetsLock: make(map[uint64]*sync.RWMutex), subnetsLockLock: sync.Mutex{}, - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -505,7 +505,7 @@ func TestService_BroadcastBlob(t *testing.T) { wg.Add(1) go func(tt *testing.T) { defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second) defer cancel() incomingMessage, err := sub.Next(ctx) @@ -517,7 +517,7 @@ func TestService_BroadcastBlob(t *testing.T) { }(t) // Attempt to broadcast nil object should fail. - ctx := context.Background() + ctx := t.Context() require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastBlob(ctx, subnet, nil)) // Broadcast to peers and wait. @@ -540,7 +540,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) { genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), subnetsLock: make(map[uint64]*sync.RWMutex), subnetsLockLock: sync.Mutex{}, - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -566,7 +566,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) { wg.Add(1) go func(tt *testing.T) { defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second) defer cancel() incomingMessage, err := sub.Next(ctx) @@ -580,7 +580,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) { }(t) // Broadcasting nil should fail. - ctx := context.Background() + ctx := t.Context() require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastLightClientOptimisticUpdate(ctx, nil)) var nilUpdate interfaces.LightClientOptimisticUpdate require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastLightClientOptimisticUpdate(ctx, nilUpdate)) @@ -607,7 +607,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) { genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), subnetsLock: make(map[uint64]*sync.RWMutex), subnetsLockLock: sync.Mutex{}, - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -633,7 +633,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) { wg.Add(1) go func(tt *testing.T) { defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second) defer cancel() incomingMessage, err := sub.Next(ctx) @@ -647,7 +647,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) { }(t) // Broadcasting nil should fail. - ctx := context.Background() + ctx := t.Context() require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastLightClientFinalityUpdate(ctx, nil)) var nilUpdate interfaces.LightClientFinalityUpdate require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastLightClientFinalityUpdate(ctx, nilUpdate)) @@ -688,7 +688,7 @@ func TestService_BroadcastDataColumn(t *testing.T) { _, pkey, ipAddr := createHost(t, port) p := &Service{ - ctx: context.Background(), + ctx: t.Context(), host: p1.BHost, pubsub: p1.PubSub(), joinedTopics: map[string]*pubsub.Topic{}, @@ -697,7 +697,7 @@ func TestService_BroadcastDataColumn(t *testing.T) { genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32), subnetsLock: make(map[uint64]*sync.RWMutex), subnetsLockLock: sync.Mutex{}, - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}), + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}), } // Create a listener. @@ -724,7 +724,7 @@ func TestService_BroadcastDataColumn(t *testing.T) { go func(tt *testing.T) { defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) defer cancel() // Wait for the peers to be checked. diff --git a/beacon-chain/p2p/connection_gater_test.go b/beacon-chain/p2p/connection_gater_test.go index f27a10d9b9..41d802c8ae 100644 --- a/beacon-chain/p2p/connection_gater_test.go +++ b/beacon-chain/p2p/connection_gater_test.go @@ -1,7 +1,6 @@ package p2p import ( - "context" "fmt" "testing" "time" @@ -29,7 +28,7 @@ func TestPeer_AtMaxLimit(t *testing.T) { s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), } - s.peers = peers.NewStatus(context.Background(), &peers.StatusConfig{ + s.peers = peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 0, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -66,14 +65,14 @@ func TestPeer_AtMaxLimit(t *testing.T) { require.NoError(t, err) addrInfo, err := peer.AddrInfoFromP2pAddr(multiAddress) require.NoError(t, err) - err = h2.Connect(context.Background(), *addrInfo) + err = h2.Connect(t.Context(), *addrInfo) require.NotNil(t, err, "Wanted connection to fail with max peer") } func TestService_InterceptBannedIP(t *testing.T) { s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 20, ScorerParams: &scorers.Config{}, }), @@ -102,7 +101,7 @@ func TestService_RejectInboundConnectionBeforeStarted(t *testing.T) { limit := 1 s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: limit, ScorerParams: &scorers.Config{}, }), @@ -133,7 +132,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) { limit := 20 s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: limit, ScorerParams: &scorers.Config{}, }), @@ -177,7 +176,7 @@ func TestPeer_BelowMaxLimit(t *testing.T) { s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), } - s.peers = peers.NewStatus(context.Background(), &peers.StatusConfig{ + s.peers = peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 1, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -210,7 +209,7 @@ func TestPeer_BelowMaxLimit(t *testing.T) { require.NoError(t, err) addrInfo, err := peer.AddrInfoFromP2pAddr(multiAddress) require.NoError(t, err) - err = h2.Connect(context.Background(), *addrInfo) + err = h2.Connect(t.Context(), *addrInfo) assert.NoError(t, err, "Wanted connection to succeed") } @@ -228,7 +227,7 @@ func TestPeerAllowList(t *testing.T) { require.NoError(t, err, "Failed to p2p listen") s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -256,7 +255,7 @@ func TestPeerAllowList(t *testing.T) { require.NoError(t, err) addrInfo, err := peer.AddrInfoFromP2pAddr(multiAddress) require.NoError(t, err) - err = h1.Connect(context.Background(), *addrInfo) + err = h1.Connect(t.Context(), *addrInfo) assert.NotNil(t, err, "Wanted connection to fail with allow list") assert.ErrorContains(t, "no good addresses", err) } @@ -275,7 +274,7 @@ func TestPeerDenyList(t *testing.T) { require.NoError(t, err, "Failed to p2p listen") s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -303,7 +302,7 @@ func TestPeerDenyList(t *testing.T) { require.NoError(t, err) addrInfo, err := peer.AddrInfoFromP2pAddr(multiAddress) require.NoError(t, err) - err = h1.Connect(context.Background(), *addrInfo) + err = h1.Connect(t.Context(), *addrInfo) assert.NotNil(t, err, "Wanted connection to fail with deny list") assert.ErrorContains(t, "no good addresses", err) } @@ -311,7 +310,7 @@ func TestPeerDenyList(t *testing.T) { func TestService_InterceptAddrDial_Allow(t *testing.T) { s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -331,7 +330,7 @@ func TestService_InterceptAddrDial_Allow(t *testing.T) { func TestService_InterceptAddrDial_Public(t *testing.T) { s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -379,7 +378,7 @@ func TestService_InterceptAddrDial_Public(t *testing.T) { func TestService_InterceptAddrDial_Private(t *testing.T) { s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -408,7 +407,7 @@ func TestService_InterceptAddrDial_Private(t *testing.T) { func TestService_InterceptAddrDial_AllowPrivate(t *testing.T) { s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -437,7 +436,7 @@ func TestService_InterceptAddrDial_AllowPrivate(t *testing.T) { func TestService_InterceptAddrDial_DenyPublic(t *testing.T) { s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } @@ -466,7 +465,7 @@ func TestService_InterceptAddrDial_DenyPublic(t *testing.T) { func TestService_InterceptAddrDial_AllowConflict(t *testing.T) { s := &Service{ ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }), } diff --git a/beacon-chain/p2p/custody_test.go b/beacon-chain/p2p/custody_test.go index 2579ee3e46..fcc549441a 100644 --- a/beacon-chain/p2p/custody_test.go +++ b/beacon-chain/p2p/custody_test.go @@ -1,7 +1,6 @@ package p2p import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" @@ -83,7 +82,7 @@ func TestCustodyGroupCountFromPeer(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Create peers status. - peers := peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers := peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }) diff --git a/beacon-chain/p2p/dial_relay_node_test.go b/beacon-chain/p2p/dial_relay_node_test.go index 676f76bd4f..40db7d5c11 100644 --- a/beacon-chain/p2p/dial_relay_node_test.go +++ b/beacon-chain/p2p/dial_relay_node_test.go @@ -1,7 +1,6 @@ package p2p import ( - "context" "fmt" "testing" @@ -23,12 +22,12 @@ func TestMakePeer_OK(t *testing.T) { } func TestDialRelayNode_InvalidPeerString(t *testing.T) { - err := dialRelayNode(context.Background(), nil, "/ip4") + err := dialRelayNode(t.Context(), nil, "/ip4") assert.ErrorContains(t, "failed to parse multiaddr \"/ip4\"", err, "Expected to fail with invalid peer string") } func TestDialRelayNode_OK(t *testing.T) { - ctx := context.Background() + ctx := t.Context() relay, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{})) require.NoError(t, err) host, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{})) diff --git a/beacon-chain/p2p/discovery_test.go b/beacon-chain/p2p/discovery_test.go index 7c03aadef4..3e29e69c0d 100644 --- a/beacon-chain/p2p/discovery_test.go +++ b/beacon-chain/p2p/discovery_test.go @@ -351,7 +351,7 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) { cfg.StaticPeers = staticPeers cfg.StateNotifier = &mock.MockStateNotifier{} cfg.NoDiscovery = true - s, err := NewService(context.Background(), cfg) + s, err := NewService(t.Context(), cfg) require.NoError(t, err) exitRoutine := make(chan bool) @@ -395,7 +395,7 @@ func TestInboundPeerLimit(t *testing.T) { s := &Service{ cfg: &Config{MaxPeers: 30}, ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{}, }), @@ -421,7 +421,7 @@ func TestOutboundPeerThreshold(t *testing.T) { s := &Service{ cfg: &Config{MaxPeers: 30}, ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false), - peers: peers.NewStatus(context.Background(), &peers.StatusConfig{ + peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{}, }), diff --git a/beacon-chain/p2p/fork_test.go b/beacon-chain/p2p/fork_test.go index 288438171d..ebaf03a057 100644 --- a/beacon-chain/p2p/fork_test.go +++ b/beacon-chain/p2p/fork_test.go @@ -1,7 +1,6 @@ package p2p import ( - "context" "math/rand" "os" "path" @@ -101,7 +100,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) { cfg.UDPPort = 14000 cfg.TCPPort = 14001 cfg.MaxPeers = 30 - s, err = NewService(context.Background(), cfg) + s, err = NewService(t.Context(), cfg) require.NoError(t, err) s.genesisTime = genesisTime s.genesisValidatorsRoot = make([]byte, 32) @@ -197,7 +196,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) { cfg.TCPPort = 14001 cfg.MaxPeers = 30 cfg.StateNotifier = &mock.MockStateNotifier{} - s, err = NewService(context.Background(), cfg) + s, err = NewService(t.Context(), cfg) require.NoError(t, err) s.genesisTime = genesisTime diff --git a/beacon-chain/p2p/gossip_scoring_params_test.go b/beacon-chain/p2p/gossip_scoring_params_test.go index 51323ae340..206f7733d0 100644 --- a/beacon-chain/p2p/gossip_scoring_params_test.go +++ b/beacon-chain/p2p/gossip_scoring_params_test.go @@ -1,7 +1,6 @@ package p2p import ( - "context" "testing" dbutil "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing" @@ -22,7 +21,7 @@ func TestCorrect_ActiveValidatorsCount(t *testing.T) { db := dbutil.SetupDB(t) s := &Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &Config{DB: db}, } bState, err := util.NewBeaconState(func(state *ethpb.BeaconState) error { diff --git a/beacon-chain/p2p/peers/peerdata/store_test.go b/beacon-chain/p2p/peers/peerdata/store_test.go index 57a992acff..0e8bd32014 100644 --- a/beacon-chain/p2p/peers/peerdata/store_test.go +++ b/beacon-chain/p2p/peers/peerdata/store_test.go @@ -1,7 +1,6 @@ package peerdata_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata" @@ -80,7 +79,7 @@ func TestStore_PeerDataGetOrCreate(t *testing.T) { } func TestStore_TrustedPeers(t *testing.T) { - store := peerdata.NewStore(context.Background(), &peerdata.StoreConfig{ + store := peerdata.NewStore(t.Context(), &peerdata.StoreConfig{ MaxPeers: 12, }) diff --git a/beacon-chain/p2p/peers/scorers/peer_status_test.go b/beacon-chain/p2p/peers/scorers/peer_status_test.go index a94630dd45..d28bac2a05 100644 --- a/beacon-chain/p2p/peers/scorers/peer_status_test.go +++ b/beacon-chain/p2p/peers/scorers/peer_status_test.go @@ -1,7 +1,6 @@ package scorers_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers" @@ -135,7 +134,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) { } func TestScorers_PeerStatus_IsBadPeer(t *testing.T) { - peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{ + peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }) pid := peer.ID("peer1") @@ -148,7 +147,7 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) { } func TestScorers_PeerStatus_BadPeers(t *testing.T) { - peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{ + peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }) pid1 := peer.ID("peer1") @@ -175,7 +174,7 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) { } func TestScorers_PeerStatus_PeerStatus(t *testing.T) { - peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{ + peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }) status, err := peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1") diff --git a/beacon-chain/p2p/peers/scorers/service_test.go b/beacon-chain/p2p/peers/scorers/service_test.go index 43e4d7931f..873137a47b 100644 --- a/beacon-chain/p2p/peers/scorers/service_test.go +++ b/beacon-chain/p2p/peers/scorers/service_test.go @@ -78,7 +78,7 @@ func TestScorers_Service_Init(t *testing.T) { } func TestScorers_Service_Score(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second) defer cancel() batchSize := uint64(flags.Get().BlockBatchLimit) @@ -212,7 +212,7 @@ func TestScorers_Service_Score(t *testing.T) { } func TestScorers_Service_loop(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second) defer cancel() peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{ @@ -267,7 +267,7 @@ func TestScorers_Service_loop(t *testing.T) { } func TestScorers_Service_IsBadPeer(t *testing.T) { - peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{ + peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -284,7 +284,7 @@ func TestScorers_Service_IsBadPeer(t *testing.T) { } func TestScorers_Service_BadPeers(t *testing.T) { - peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{ + peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ diff --git a/beacon-chain/p2p/peers/status_test.go b/beacon-chain/p2p/peers/status_test.go index e20b15dfbd..925a2f2e3a 100644 --- a/beacon-chain/p2p/peers/status_test.go +++ b/beacon-chain/p2p/peers/status_test.go @@ -1,7 +1,6 @@ package peers_test import ( - "context" "crypto/rand" "strconv" "testing" @@ -27,7 +26,7 @@ import ( func TestStatus(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -41,7 +40,7 @@ func TestStatus(t *testing.T) { func TestPeerExplicitAdd(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -82,7 +81,7 @@ func TestPeerExplicitAdd(t *testing.T) { func TestPeerNoENR(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -106,7 +105,7 @@ func TestPeerNoENR(t *testing.T) { func TestPeerNoOverwriteENR(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -133,7 +132,7 @@ func TestPeerNoOverwriteENR(t *testing.T) { func TestErrUnknownPeer(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -166,7 +165,7 @@ func TestErrUnknownPeer(t *testing.T) { func TestPeerCommitteeIndices(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -203,7 +202,7 @@ func TestPeerCommitteeIndices(t *testing.T) { func TestPeerSubscribedToSubnet(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -247,7 +246,7 @@ func TestPeerSubscribedToSubnet(t *testing.T) { func TestPeerImplicitAdd(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -270,7 +269,7 @@ func TestPeerImplicitAdd(t *testing.T) { func TestPeerChainState(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -305,7 +304,7 @@ func TestPeerChainState(t *testing.T) { func TestPeerWithNilChainState(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -331,7 +330,7 @@ func TestPeerWithNilChainState(t *testing.T) { func TestPeerBadResponses(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -381,7 +380,7 @@ func TestPeerBadResponses(t *testing.T) { func TestAddMetaData(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -410,7 +409,7 @@ func TestAddMetaData(t *testing.T) { func TestPeerConnectionStatuses(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -452,7 +451,7 @@ func TestPeerConnectionStatuses(t *testing.T) { func TestPeerValidTime(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -498,7 +497,7 @@ func TestPeerValidTime(t *testing.T) { func TestPrune(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -554,7 +553,7 @@ func TestPeerIPTracker(t *testing.T) { }) defer resetCfg() maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -592,7 +591,7 @@ func TestPeerIPTracker(t *testing.T) { } func TestTrimmedOrderedPeers(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -666,7 +665,7 @@ func TestTrimmedOrderedPeers(t *testing.T) { } func TestConcurrentPeerLimitHolds(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -678,7 +677,7 @@ func TestConcurrentPeerLimitHolds(t *testing.T) { } func TestAtInboundPeerLimit(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -703,7 +702,7 @@ func TestPrunePeers(t *testing.T) { EnablePeerScorer: false, }) defer resetCfg() - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -764,7 +763,7 @@ func TestPrunePeers(t *testing.T) { } func TestPrunePeers_TrustedPeers(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -1006,7 +1005,7 @@ func TestStatus_BestPeer(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{Threshold: 2}, @@ -1028,7 +1027,7 @@ func TestStatus_BestPeer(t *testing.T) { func TestBestFinalized_returnsMaxValue(t *testing.T) { maxBadResponses := 2 maxPeers := 10 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -1050,7 +1049,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) { } func TestStatus_BestNonFinalized(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -1076,7 +1075,7 @@ func TestStatus_BestNonFinalized(t *testing.T) { func TestStatus_CurrentEpoch(t *testing.T) { maxBadResponses := 2 - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -1104,7 +1103,7 @@ func TestStatus_CurrentEpoch(t *testing.T) { } func TestInbound(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -1123,7 +1122,7 @@ func TestInbound(t *testing.T) { } func TestInboundConnected(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -1143,7 +1142,7 @@ func TestInboundConnected(t *testing.T) { } func TestInboundConnectedWithProtocol(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -1204,7 +1203,7 @@ func TestInboundConnectedWithProtocol(t *testing.T) { } func TestOutbound(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -1223,7 +1222,7 @@ func TestOutbound(t *testing.T) { } func TestOutboundConnected(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ @@ -1243,7 +1242,7 @@ func TestOutboundConnected(t *testing.T) { } func TestOutboundConnectedWithProtocol(t *testing.T) { - p := peers.NewStatus(context.Background(), &peers.StatusConfig{ + p := peers.NewStatus(t.Context(), &peers.StatusConfig{ PeerLimit: 30, ScorerParams: &scorers.Config{ BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{ diff --git a/beacon-chain/p2p/pubsub_filter_test.go b/beacon-chain/p2p/pubsub_filter_test.go index d52c342d31..a46ddec0c7 100644 --- a/beacon-chain/p2p/pubsub_filter_test.go +++ b/beacon-chain/p2p/pubsub_filter_test.go @@ -340,7 +340,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) { func TestService_MonitorsStateForkUpdates(t *testing.T) { params.SetupTestConfigCleanup(t) - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second) defer cancel() cs := startup.NewClockSynchronizer() s, err := NewService(ctx, &Config{ClockWaiter: cs}) diff --git a/beacon-chain/p2p/pubsub_test.go b/beacon-chain/p2p/pubsub_test.go index 38c07220ea..c895efe1f8 100644 --- a/beacon-chain/p2p/pubsub_test.go +++ b/beacon-chain/p2p/pubsub_test.go @@ -18,12 +18,12 @@ import ( func TestService_PublishToTopicConcurrentMapWrite(t *testing.T) { cs := startup.NewClockSynchronizer() - s, err := NewService(context.Background(), &Config{ + s, err := NewService(t.Context(), &Config{ StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: cs, }) require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second) defer cancel() go s.awaitStateInitialized() diff --git a/beacon-chain/p2p/sender_test.go b/beacon-chain/p2p/sender_test.go index 6cf3090725..dfd86f4c98 100644 --- a/beacon-chain/p2p/sender_test.go +++ b/beacon-chain/p2p/sender_test.go @@ -1,7 +1,6 @@ package p2p import ( - "context" "sync" "testing" "time" @@ -50,7 +49,7 @@ func TestService_Send(t *testing.T) { wg.Done() }) - stream, err := svc.Send(context.Background(), msg, "/testing/1", p2.BHost.ID()) + stream, err := svc.Send(t.Context(), msg, "/testing/1", p2.BHost.ID()) require.NoError(t, err) util.WaitTimeout(&wg, 1*time.Second) diff --git a/beacon-chain/p2p/service_test.go b/beacon-chain/p2p/service_test.go index 00d290cf39..f2e65c6053 100644 --- a/beacon-chain/p2p/service_test.go +++ b/beacon-chain/p2p/service_test.go @@ -84,7 +84,7 @@ func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) { func TestService_Stop_SetsStartedToFalse(t *testing.T) { params.SetupTestConfigCleanup(t) - s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}}) + s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}}) require.NoError(t, err) s.started = true s.dv5Listener = &mockListener{} @@ -94,7 +94,7 @@ func TestService_Stop_SetsStartedToFalse(t *testing.T) { func TestService_Stop_DontPanicIfDv5ListenerIsNotInited(t *testing.T) { params.SetupTestConfigCleanup(t) - s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}}) + s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}}) require.NoError(t, err) assert.NoError(t, s.Stop()) } @@ -110,7 +110,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) { QUICPort: 3000, ClockWaiter: cs, } - s, err := NewService(context.Background(), cfg) + s, err := NewService(t.Context(), cfg) require.NoError(t, err) s.dv5Listener = &mockListener{} exitRoutine := make(chan bool) @@ -158,7 +158,7 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) { NoDiscovery: true, // <-- no s.dv5Listener is created ClockWaiter: cs, } - s, err := NewService(context.Background(), cfg) + s, err := NewService(t.Context(), cfg) require.NoError(t, err) // required params to addForkEntry in s.forkWatcher @@ -261,7 +261,7 @@ func TestListenForNewNodes(t *testing.T) { cfg.UDPPort = 14000 cfg.TCPPort = 14001 - s, err = NewService(context.Background(), cfg) + s, err = NewService(t.Context(), cfg) require.NoError(t, err) exitRoutine := make(chan bool) go func() { @@ -302,7 +302,7 @@ func TestPeer_Disconnect(t *testing.T) { require.NoError(t, err) addrInfo, err := peer.AddrInfoFromP2pAddr(h2Addr) require.NoError(t, err) - require.NoError(t, s.host.Connect(context.Background(), *addrInfo)) + require.NoError(t, s.host.Connect(t.Context(), *addrInfo)) assert.Equal(t, 1, len(s.host.Network().Peers()), "Invalid number of peers") assert.Equal(t, 1, len(s.host.Network().Conns()), "Invalid number of connections") require.NoError(t, s.Disconnect(h2.ID())) @@ -311,7 +311,7 @@ func TestPeer_Disconnect(t *testing.T) { func TestService_JoinLeaveTopic(t *testing.T) { params.SetupTestConfigCleanup(t) - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second) defer cancel() gs := startup.NewClockSynchronizer() s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs}) @@ -369,7 +369,7 @@ func TestService_connectWithPeer(t *testing.T) { { name: "bad peer", peers: func() *peers.Status { - ps := peers.NewStatus(context.Background(), &peers.StatusConfig{ + ps := peers.NewStatus(t.Context(), &peers.StatusConfig{ ScorerParams: &scorers.Config{}, }) for i := 0; i < 10; i++ { @@ -389,7 +389,7 @@ func TestService_connectWithPeer(t *testing.T) { t.Fatal(err) } }() - ctx := context.Background() + ctx := t.Context() s := &Service{ host: h, peers: tt.peers, diff --git a/beacon-chain/p2p/subnets_test.go b/beacon-chain/p2p/subnets_test.go index 231815a3b8..faa6b17ed9 100644 --- a/beacon-chain/p2p/subnets_test.go +++ b/beacon-chain/p2p/subnets_test.go @@ -42,7 +42,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) { require.NoError(t, err) // Create a context. - ctx := context.Background() + ctx := t.Context() // Use shorter period for testing. currentPeriod := pollingPeriod diff --git a/beacon-chain/rpc/core/validator_test.go b/beacon-chain/rpc/core/validator_test.go index 9001d92acc..f60de4214e 100644 --- a/beacon-chain/rpc/core/validator_test.go +++ b/beacon-chain/rpc/core/validator_test.go @@ -1,7 +1,6 @@ package core import ( - "context" "encoding/binary" "testing" "time" @@ -107,7 +106,7 @@ func TestService_SubmitSignedAggregateSelectionProof(t *testing.T) { }, Signature: fakeSig, } - rpcError := s.SubmitSignedAggregateSelectionProof(context.Background(), agg) + rpcError := s.SubmitSignedAggregateSelectionProof(t.Context(), agg) assert.Equal(t, true, rpcError == nil) }) @@ -122,7 +121,7 @@ func TestService_SubmitSignedAggregateSelectionProof(t *testing.T) { }, Signature: make([]byte, 96), } - rpcError := s.SubmitSignedAggregateSelectionProof(context.Background(), agg) + rpcError := s.SubmitSignedAggregateSelectionProof(t.Context(), agg) assert.ErrorContains(t, "old aggregate and proof", rpcError.Err) }) @@ -136,7 +135,7 @@ func TestService_SubmitSignedAggregateSelectionProof(t *testing.T) { }, Signature: make([]byte, 96), } - rpcError := s.SubmitSignedAggregateSelectionProof(context.Background(), agg) + rpcError := s.SubmitSignedAggregateSelectionProof(t.Context(), agg) assert.ErrorContains(t, "electra aggregate and proof not supported yet", rpcError.Err) }) } diff --git a/beacon-chain/rpc/eth/beacon/handlers_pool_test.go b/beacon-chain/rpc/eth/beacon/handlers_pool_test.go index 9c400649b3..fcbfed6790 100644 --- a/beacon-chain/rpc/eth/beacon/handlers_pool_test.go +++ b/beacon-chain/rpc/eth/beacon/handlers_pool_test.go @@ -2,7 +2,6 @@ package beacon import ( "bytes" - "context" "encoding/json" "fmt" "net/http" @@ -1931,7 +1930,7 @@ func TestGetProposerSlashings(t *testing.T) { } func TestSubmitAttesterSlashings(t *testing.T) { - ctx := context.Background() + ctx := t.Context() transition.SkipSlotCache.Disable() defer transition.SkipSlotCache.Enable() diff --git a/beacon-chain/rpc/eth/beacon/handlers_state_test.go b/beacon-chain/rpc/eth/beacon/handlers_state_test.go index ff94f313ee..690f796acf 100644 --- a/beacon-chain/rpc/eth/beacon/handlers_state_test.go +++ b/beacon-chain/rpc/eth/beacon/handlers_state_test.go @@ -28,7 +28,7 @@ import ( ) func TestGetStateRoot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() fakeState, err := util.NewBeaconState() require.NoError(t, err) stateRoot, err := fakeState.HashTreeRoot(ctx) @@ -128,7 +128,7 @@ func TestGetRandao(t *testing.T) { epochCurrent := primitives.Epoch(100000) epochOld := 100000 - params.BeaconConfig().EpochsPerHistoricalVector + 1 - ctx := context.Background() + ctx := t.Context() st, err := util.NewBeaconState() require.NoError(t, err) // Set slot to epoch 100000 @@ -420,7 +420,7 @@ func Test_extractSyncSubcommittees(t *testing.T) { } func TestGetSyncCommittees(t *testing.T) { - ctx := context.Background() + ctx := t.Context() st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize) syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize) vals := st.Validators() diff --git a/beacon-chain/rpc/eth/beacon/handlers_test.go b/beacon-chain/rpc/eth/beacon/handlers_test.go index 32d69a69d5..28e1f5a2f4 100644 --- a/beacon-chain/rpc/eth/beacon/handlers_test.go +++ b/beacon-chain/rpc/eth/beacon/handlers_test.go @@ -3490,7 +3490,7 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) { } func TestValidateConsensus(t *testing.T) { - ctx := context.Background() + ctx := t.Context() parentState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount) parentBlock, err := util.GenerateFullBlock(parentState, privs, util.DefaultBlockGenConfig(), parentState.Slot()) @@ -3525,7 +3525,7 @@ func TestValidateEquivocation(t *testing.T) { roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root"))) require.NoError(t, err) fc := doublylinkedtree.New() - require.NoError(t, fc.InsertNode(context.Background(), st, roblock)) + require.NoError(t, fc.InsertNode(t.Context(), st, roblock)) server := &Server{ ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc}, } @@ -3544,7 +3544,7 @@ func TestValidateEquivocation(t *testing.T) { require.NoError(t, err) fc := doublylinkedtree.New() - require.NoError(t, fc.InsertNode(context.Background(), st, roblock)) + require.NoError(t, fc.InsertNode(t.Context(), st, roblock)) server := &Server{ ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc}, } @@ -3556,7 +3556,7 @@ func TestValidateEquivocation(t *testing.T) { func TestServer_GetBlockRoot(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() url := "http://example.com/eth/v1/beacon/blocks/{block_id}/root" genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB) @@ -3767,7 +3767,7 @@ func TestServer_GetBlockRoot(t *testing.T) { } func TestGetStateFork(t *testing.T) { - ctx := context.Background() + ctx := t.Context() request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v1/beacon/states/{state_id}/fork", nil) request.SetPathValue("state_id", "head") request.Header.Set("Accept", "application/octet-stream") @@ -3877,7 +3877,7 @@ func TestGetStateFork(t *testing.T) { func TestGetCommittees(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() url := "http://example.com/eth/v1/beacon/states/{state_id}/committees" var st state.BeaconState @@ -4123,7 +4123,7 @@ func TestGetCommittees(t *testing.T) { func TestGetBlockHeaders(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() _, blkContainers := fillDBTestBlocks(ctx, t, beaconDB) headBlock := blkContainers[len(blkContainers)-1] @@ -4715,7 +4715,7 @@ func TestGetDepositSnapshot(t *testing.T) { chainData := ð.ETH1ChainData{ DepositSnapshot: snapshot.ToProto(), } - err = beaconDB.SaveExecutionChainData(context.Background(), chainData) + err = beaconDB.SaveExecutionChainData(t.Context(), chainData) require.NoError(t, err) s := Server{ BeaconDB: beaconDB, @@ -4767,11 +4767,11 @@ func TestServer_broadcastBlobSidecars(t *testing.T) { blk, err := blocks.NewSignedBeaconBlock(b.Block) require.NoError(t, err) - require.NoError(t, server.broadcastSeenBlockSidecars(context.Background(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs)) + require.NoError(t, server.broadcastSeenBlockSidecars(t.Context(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs)) require.LogsDoNotContain(t, hook, "Broadcasted blob sidecar for already seen block") server.FinalizationFetcher = &chainMock.ChainService{NotFinalized: false} - require.NoError(t, server.broadcastSeenBlockSidecars(context.Background(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs)) + require.NoError(t, server.broadcastSeenBlockSidecars(t.Context(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs)) require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block") } diff --git a/beacon-chain/rpc/eth/blob/handlers_test.go b/beacon-chain/rpc/eth/blob/handlers_test.go index 78074e85a1..6da05a1f35 100644 --- a/beacon-chain/rpc/eth/blob/handlers_test.go +++ b/beacon-chain/rpc/eth/blob/handlers_test.go @@ -2,7 +2,6 @@ package blob import ( "bytes" - "context" "encoding/json" "fmt" "net/http" @@ -40,7 +39,7 @@ func TestBlobs(t *testing.T) { db := testDB.SetupDB(t) denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4) - require.NoError(t, db.SaveBlock(context.Background(), denebBlock)) + require.NoError(t, db.SaveBlock(t.Context(), denebBlock)) bs := filesystem.NewEphemeralBlobStorage(t) testSidecars := verification.FakeVerifySliceForTest(t, blobs) for i := range testSidecars { @@ -308,7 +307,7 @@ func TestBlobs(t *testing.T) { commitments, err := denebBlock.Block().Body().BlobKzgCommitments() require.NoError(t, err) require.Equal(t, len(commitments), 0) - require.NoError(t, db.SaveBlock(context.Background(), denebBlock)) + require.NoError(t, db.SaveBlock(t.Context(), denebBlock)) u := "http://foo.example/333" request := httptest.NewRequest("GET", u, nil) @@ -420,7 +419,7 @@ func TestBlobs_Electra(t *testing.T) { db := testDB.SetupDB(t) electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 123, maxBlobsPerBlockByVersion(version.Electra)) - require.NoError(t, db.SaveBlock(context.Background(), electraBlock)) + require.NoError(t, db.SaveBlock(t.Context(), electraBlock)) bs := filesystem.NewEphemeralBlobStorage(t) testSidecars := verification.FakeVerifySliceForTest(t, blobs) for i := range testSidecars { diff --git a/beacon-chain/rpc/eth/debug/handlers_test.go b/beacon-chain/rpc/eth/debug/handlers_test.go index d300e0d793..873b3666a7 100644 --- a/beacon-chain/rpc/eth/debug/handlers_test.go +++ b/beacon-chain/rpc/eth/debug/handlers_test.go @@ -2,7 +2,6 @@ package debug import ( "bytes" - "context" "encoding/json" "net/http" "net/http/httptest" @@ -25,7 +24,7 @@ import ( ) func TestGetBeaconStateV2(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbtest.SetupDB(t) t.Run("phase0", func(t *testing.T) { diff --git a/beacon-chain/rpc/eth/events/events_test.go b/beacon-chain/rpc/eth/events/events_test.go index 7879d30275..e8a6fcb628 100644 --- a/beacon-chain/rpc/eth/events/events_test.go +++ b/beacon-chain/rpc/eth/events/events_test.go @@ -40,7 +40,7 @@ var testEventWriteTimeout = 100 * time.Millisecond func requireAllEventsReceived(t *testing.T, stn, opn *mockChain.EventFeedWrapper, events []*feed.Event, req *topicRequest, s *Server, w *StreamingResponseWriterRecorder, logs chan *logrus.Entry) { // maxBufferSize param copied from sse lib client code sseR := sse.NewEventStreamReader(w.Body(), 1<<24) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(t.Context(), time.Second) defer cancel() expected := make(map[string]bool) @@ -48,7 +48,7 @@ func requireAllEventsReceived(t *testing.T, stn, opn *mockChain.EventFeedWrapper ev := events[i] // serialize the event the same way the server will so that we can compare expectation to results. top := topicForEvent(ev) - eb, err := s.lazyReaderForEvent(context.Background(), ev, req) + eb, err := s.lazyReaderForEvent(t.Context(), ev, req) require.NoError(t, err) exb, err := io.ReadAll(eb()) require.NoError(t, err) @@ -341,7 +341,7 @@ func newStreamTestSync(t *testing.T) *streamTestSync { logChan := make(chan *logrus.Entry, 100) cew := util.NewChannelEntryWriter(logChan) undo := util.RegisterHookWithUndo(logger, cew) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) return &streamTestSync{ t: t, ctx: ctx, @@ -590,7 +590,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) { } func TestFillEventData(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("AlreadyFilledData_ShouldShortCircuitWithoutError", func(t *testing.T) { b, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ð.SignedBeaconBlockBellatrix{})) require.NoError(t, err) @@ -731,7 +731,7 @@ func wedgedWriterTestCase(t *testing.T, queueDepth func([]*feed.Event) int) { EventFeedDepth: queueDepth(events), } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second) defer cancel() eventsWritten := make(chan struct{}) go func() { diff --git a/beacon-chain/rpc/eth/helpers/sync_test.go b/beacon-chain/rpc/eth/helpers/sync_test.go index 7e5889e193..ca007c0c53 100644 --- a/beacon-chain/rpc/eth/helpers/sync_test.go +++ b/beacon-chain/rpc/eth/helpers/sync_test.go @@ -1,7 +1,6 @@ package helpers import ( - "context" "strconv" "testing" @@ -25,7 +24,7 @@ import ( ) func TestIsOptimistic(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("head optimistic", func(t *testing.T) { cs := &chainmock.ChainService{Optimistic: true} diff --git a/beacon-chain/rpc/eth/light-client/handlers_test.go b/beacon-chain/rpc/eth/light-client/handlers_test.go index e390ff0194..0bd382cb66 100644 --- a/beacon-chain/rpc/eth/light-client/handlers_test.go +++ b/beacon-chain/rpc/eth/light-client/handlers_test.go @@ -2,7 +2,6 @@ package lightclient import ( "bytes" - "context" "encoding/json" "fmt" "math/rand" @@ -449,7 +448,7 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) { func TestLightClientHandler_GetLightClientByRange(t *testing.T) { helpers.ClearCache() - ctx := context.Background() + ctx := t.Context() params.SetupTestConfigCleanup(t) config := params.BeaconConfig() @@ -1503,7 +1502,7 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) { for testVersion := 1; testVersion < 6; testVersion++ { t.Run(version.String(testVersion), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() l := util.NewTestLightClient(t, testVersion) update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock) @@ -1532,7 +1531,7 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) { }) t.Run(version.String(testVersion)+" SSZ", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() l := util.NewTestLightClient(t, testVersion) update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock) @@ -1588,7 +1587,7 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) { for testVersion := 1; testVersion < 6; testVersion++ { t.Run(version.String(testVersion), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() l := util.NewTestLightClient(t, testVersion) update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock) require.NoError(t, err) @@ -1618,7 +1617,7 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) { }) t.Run(version.String(testVersion)+" SSZ", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() l := util.NewTestLightClient(t, testVersion) update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock) require.NoError(t, err) diff --git a/beacon-chain/rpc/eth/rewards/handlers_test.go b/beacon-chain/rpc/eth/rewards/handlers_test.go index eb0ce82c07..b3d5f2c9ae 100644 --- a/beacon-chain/rpc/eth/rewards/handlers_test.go +++ b/beacon-chain/rpc/eth/rewards/handlers_test.go @@ -2,7 +2,6 @@ package rewards import ( "bytes" - "context" "encoding/json" "fmt" "net/http" @@ -100,7 +99,7 @@ func BlockRewardTestSetup(t *testing.T, ver int) (state.BeaconState, interfaces. require.NoError(t, st.SetValidators(validators)) require.NoError(t, st.SetBalances(balances)) require.NoError(t, st.SetCurrentParticipationBits(make([]byte, valCount))) - syncCommittee, err := altair.NextSyncCommittee(context.Background(), st) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), st) require.NoError(t, err) require.NoError(t, st.SetCurrentSyncCommittee(syncCommittee)) slot0bRoot := bytesutil.PadTo([]byte("slot0root"), 32) diff --git a/beacon-chain/rpc/eth/rewards/service_test.go b/beacon-chain/rpc/eth/rewards/service_test.go index 0fd06b188c..026246d44a 100644 --- a/beacon-chain/rpc/eth/rewards/service_test.go +++ b/beacon-chain/rpc/eth/rewards/service_test.go @@ -1,7 +1,6 @@ package rewards import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition" @@ -13,7 +12,7 @@ import ( ) func TestGetStateForRewards_NextSlotCacheHit(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbutil.SetupDB(t) st, err := util.NewBeaconStateDeneb() diff --git a/beacon-chain/rpc/eth/validator/handlers_test.go b/beacon-chain/rpc/eth/validator/handlers_test.go index 1d3f9eea04..ca0e9c3fcc 100644 --- a/beacon-chain/rpc/eth/validator/handlers_test.go +++ b/beacon-chain/rpc/eth/validator/handlers_test.go @@ -2,7 +2,6 @@ package validator import ( "bytes" - "context" "encoding/json" "fmt" "net/http" @@ -1015,7 +1014,7 @@ func TestSubmitSyncCommitteeSubscription(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := util.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not set up genesis state") genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") @@ -1186,7 +1185,7 @@ func TestSubmitBeaconCommitteeSubscription(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not set up genesis state") // Set state to non-epoch start slot. require.NoError(t, bs.SetSlot(5)) @@ -1587,7 +1586,7 @@ func TestGetAttestationData(t *testing.T) { }) t.Run("request slot is not current slot", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbutil.SetupDB(t) slot := 3*params.BeaconConfig().SlotsPerEpoch + 1 @@ -2181,7 +2180,7 @@ func TestGetAttesterDuties(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not set up genesis state") // Set state to non-epoch start slot. require.NoError(t, bs.SetSlot(5)) @@ -2211,7 +2210,7 @@ func TestGetAttesterDuties(t *testing.T) { State: bs, Root: genesisRoot[:], Slot: &chainSlot, } db := dbutil.SetupDB(t) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot)) s := &Server{ Stater: &testutil.MockStater{ @@ -2382,7 +2381,7 @@ func TestGetAttesterDuties(t *testing.T) { require.Equal(t, 0, len(resp.Data)) }) t.Run("execution optimistic", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() parentRoot := [32]byte{'a'} blk := util.NewBeaconBlock() @@ -2465,10 +2464,10 @@ func TestGetProposerDuties(t *testing.T) { } db := dbutil.SetupDB(t) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot)) t.Run("ok", func(t *testing.T) { - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not set up genesis state") require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch)) require.NoError(t, bs.SetBlockRoots(roots)) @@ -2510,7 +2509,7 @@ func TestGetProposerDuties(t *testing.T) { assert.Equal(t, hexutil.Encode(pubKeys[12289]), expectedDuty.Pubkey) }) t.Run("next epoch", func(t *testing.T) { - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not set up genesis state") require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch)) require.NoError(t, bs.SetBlockRoots(roots)) @@ -2552,7 +2551,7 @@ func TestGetProposerDuties(t *testing.T) { assert.Equal(t, hexutil.Encode(pubKeys[1360]), expectedDuty.Pubkey) }) t.Run("epoch out of bounds", func(t *testing.T) { - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not set up genesis state") // Set state to non-epoch start slot. require.NoError(t, bs.SetSlot(5)) @@ -2586,7 +2585,7 @@ func TestGetProposerDuties(t *testing.T) { assert.StringContains(t, fmt.Sprintf("Request epoch %d can not be greater than next epoch %d", currentEpoch+2, currentEpoch+1), e.Message) }) t.Run("execution optimistic", func(t *testing.T) { - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not set up genesis state") // Set state to non-epoch start slot. require.NoError(t, bs.SetSlot(5)) @@ -2930,7 +2929,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) { assert.StringContains(t, "Epoch is too far in the future", e.Message) }) t.Run("execution optimistic", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbutil.SetupDB(t) require.NoError(t, db.SaveStateSummary(ctx, ðpbalpha.StateSummary{Slot: 0, Root: []byte("root")})) require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, ðpbalpha.Checkpoint{Epoch: 0, Root: []byte("root")})) diff --git a/beacon-chain/rpc/lookup/blocker_test.go b/beacon-chain/rpc/lookup/blocker_test.go index bccd2f967a..8251b19ae1 100644 --- a/beacon-chain/rpc/lookup/blocker_test.go +++ b/beacon-chain/rpc/lookup/blocker_test.go @@ -1,7 +1,6 @@ package lookup import ( - "context" "fmt" "math" "net/http" @@ -27,7 +26,7 @@ import ( func TestGetBlock(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() genBlk, blkContainers := testutil.FillDBWithBlocks(ctx, t, beaconDB) canonicalRoots := make(map[[32]byte]bool) @@ -163,10 +162,10 @@ func TestGetBlob(t *testing.T) { cfg := params.BeaconConfig().Copy() cfg.DenebForkEpoch = 1 params.OverrideBeaconConfig(cfg) - ctx := context.Background() + ctx := t.Context() db := testDB.SetupDB(t) denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4) - require.NoError(t, db.SaveBlock(context.Background(), denebBlock)) + require.NoError(t, db.SaveBlock(t.Context(), denebBlock)) _, bs := filesystem.NewEphemeralBlobStorageAndFs(t) testSidecars := verification.FakeVerifySliceForTest(t, blobs) for i := range testSidecars { diff --git a/beacon-chain/rpc/lookup/stater_test.go b/beacon-chain/rpc/lookup/stater_test.go index c7e6d0e38c..10efbd51ed 100644 --- a/beacon-chain/rpc/lookup/stater_test.go +++ b/beacon-chain/rpc/lookup/stater_test.go @@ -1,7 +1,6 @@ package lookup import ( - "context" "strconv" "strings" "testing" @@ -24,7 +23,7 @@ import ( ) func TestGetState(t *testing.T) { - ctx := context.Background() + ctx := t.Context() headSlot := primitives.Slot(123) fillSlot := func(state *ethpb.BeaconState) error { @@ -213,7 +212,7 @@ func TestGetState(t *testing.T) { } func TestGetStateRoot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() headSlot := primitives.Slot(123) fillSlot := func(state *ethpb.BeaconState) error { @@ -426,7 +425,7 @@ func TestNewStateNotFoundError(t *testing.T) { func TestStateBySlot_FutureSlot(t *testing.T) { slot := primitives.Slot(100) p := BeaconDbStater{GenesisTimeFetcher: &chainMock.ChainService{Slot: &slot}} - _, err := p.StateBySlot(context.Background(), 101) + _, err := p.StateBySlot(t.Context(), 101) assert.ErrorContains(t, "requested slot is in the future", err) } @@ -440,7 +439,7 @@ func TestStateBySlot_AfterHeadSlot(t *testing.T) { mockReplayer := mockstategen.NewReplayerBuilder() mockReplayer.SetMockStateForSlot(slotSt, 101) p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer} - st, err := p.StateBySlot(context.Background(), 101) + st, err := p.StateBySlot(t.Context(), 101) require.NoError(t, err) assert.Equal(t, primitives.Slot(101), st.Slot()) } diff --git a/beacon-chain/rpc/prysm/beacon/handlers_test.go b/beacon-chain/rpc/prysm/beacon/handlers_test.go index 9fb3d17f85..6290c55dc8 100644 --- a/beacon-chain/rpc/prysm/beacon/handlers_test.go +++ b/beacon-chain/rpc/prysm/beacon/handlers_test.go @@ -2,7 +2,6 @@ package beacon import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -95,7 +94,7 @@ func addDefaultReplayerBuilder(s *Server, h stategen.HistoryAccessor) { func TestServer_GetIndividualVotes_ValidatorsDontExist(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() var slot primitives.Slot = 0 validators := uint64(64) @@ -212,7 +211,7 @@ func TestServer_GetIndividualVotes_ValidatorsDontExist(t *testing.T) { func TestServer_GetIndividualVotes_Working(t *testing.T) { helpers.ClearCache() beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := uint64(32) stateWithValidators, _ := util.DeterministicGenesisState(t, validators) @@ -298,7 +297,7 @@ func TestServer_GetIndividualVotes_Working(t *testing.T) { func TestServer_GetIndividualVotes_WorkingAltair(t *testing.T) { helpers.ClearCache() beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() var slot primitives.Slot = 0 validators := uint64(32) @@ -380,7 +379,7 @@ func TestServer_GetIndividualVotes_AltairEndOfEpoch(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := uint64(32) beaconState, _ := util.DeterministicGenesisStateAltair(t, validators) @@ -477,7 +476,7 @@ func TestServer_GetIndividualVotes_BellatrixEndOfEpoch(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := uint64(32) beaconState, _ := util.DeterministicGenesisStateBellatrix(t, validators) @@ -574,7 +573,7 @@ func TestServer_GetIndividualVotes_CapellaEndOfEpoch(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := uint64(32) beaconState, _ := util.DeterministicGenesisStateCapella(t, validators) @@ -676,7 +675,7 @@ func TestServer_GetChainHead_NoGenesis(t *testing.T) { genBlock := util.NewBeaconBlock() genBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, genBlock) + util.SaveBlock(t, t.Context(), db, genBlock) gRoot, err := genBlock.Block.HashTreeRoot() require.NoError(t, err) cases := []struct { @@ -744,10 +743,10 @@ func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) { genBlock := util.NewBeaconBlock() genBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, genBlock) + util.SaveBlock(t, t.Context(), db, genBlock) gRoot, err := genBlock.Block.HashTreeRoot() require.NoError(t, err) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), gRoot)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), gRoot)) wsb, err := blocks.NewSignedBeaconBlock(genBlock) require.NoError(t, err) @@ -798,29 +797,29 @@ func TestServer_GetChainHead(t *testing.T) { db := dbTest.SetupDB(t) genBlock := util.NewBeaconBlock() genBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, genBlock) + util.SaveBlock(t, t.Context(), db, genBlock) gRoot, err := genBlock.Block.HashTreeRoot() require.NoError(t, err) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), gRoot)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), gRoot)) finalizedBlock := util.NewBeaconBlock() finalizedBlock.Block.Slot = 1 finalizedBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, finalizedBlock) + util.SaveBlock(t, t.Context(), db, finalizedBlock) fRoot, err := finalizedBlock.Block.HashTreeRoot() require.NoError(t, err) justifiedBlock := util.NewBeaconBlock() justifiedBlock.Block.Slot = 2 justifiedBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, justifiedBlock) + util.SaveBlock(t, t.Context(), db, justifiedBlock) jRoot, err := justifiedBlock.Block.HashTreeRoot() require.NoError(t, err) prevJustifiedBlock := util.NewBeaconBlock() prevJustifiedBlock.Block.Slot = 3 prevJustifiedBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, prevJustifiedBlock) + util.SaveBlock(t, t.Context(), db, prevJustifiedBlock) pjRoot, err := prevJustifiedBlock.Block.HashTreeRoot() require.NoError(t, err) diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/assignments_test.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/assignments_test.go index 1a8b970aec..7330c502dd 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/assignments_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/assignments_test.go @@ -1,7 +1,6 @@ package beacon import ( - "context" "encoding/binary" "fmt" "strconv" @@ -25,7 +24,7 @@ import ( func TestServer_ListAssignments_CannotRequestFutureEpoch(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() bs := &Server{ BeaconDB: db, GenesisTimeFetcher: &mock.ChainService{}, @@ -47,7 +46,7 @@ func TestServer_ListAssignments_CannotRequestFutureEpoch(t *testing.T) { func TestServer_ListAssignments_Pagination_InputOutOfRange(t *testing.T) { helpers.ClearCache() db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() count := 100 validators := make([]*ethpb.Validator, 0, count) for i := 0; i < count; i++ { @@ -89,7 +88,7 @@ func TestServer_ListAssignments_Pagination_InputOutOfRange(t *testing.T) { } wanted := fmt.Sprintf("page start %d >= list %d", 500, count) - _, err = bs.ListValidatorAssignments(context.Background(), ðpb.ListValidatorAssignmentsRequest{ + _, err = bs.ListValidatorAssignments(t.Context(), ðpb.ListValidatorAssignmentsRequest{ PageToken: strconv.Itoa(2), QueryFilter: ðpb.ListValidatorAssignmentsRequest_Genesis{Genesis: true}, }) @@ -105,14 +104,14 @@ func TestServer_ListAssignments_Pagination_ExceedsMaxPageSize(t *testing.T) { PageToken: strconv.Itoa(0), PageSize: exceedsMax, } - _, err := bs.ListValidatorAssignments(context.Background(), req) + _, err := bs.ListValidatorAssignments(t.Context(), req) assert.ErrorContains(t, wanted, err) } func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing.T) { helpers.ClearCache() db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() count := 500 validators := make([]*ethpb.Validator, 0, count) for i := 0; i < count; i++ { @@ -164,7 +163,7 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing. ReplayerBuilder: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(s)), } - res, err := bs.ListValidatorAssignments(context.Background(), ðpb.ListValidatorAssignmentsRequest{ + res, err := bs.ListValidatorAssignments(t.Context(), ðpb.ListValidatorAssignmentsRequest{ QueryFilter: ðpb.ListValidatorAssignmentsRequest_Genesis{Genesis: true}, }) require.NoError(t, err) @@ -174,7 +173,7 @@ func TestServer_ListAssignments_Pagination_DefaultPageSize_NoArchive(t *testing. activeIndices, err := helpers.ActiveValidatorIndices(ctx, s, 0) require.NoError(t, err) - assignments, err := helpers.CommitteeAssignments(context.Background(), s, 0, activeIndices[0:params.BeaconConfig().DefaultPageSize]) + assignments, err := helpers.CommitteeAssignments(t.Context(), s, 0, activeIndices[0:params.BeaconConfig().DefaultPageSize]) require.NoError(t, err) proposerSlots, err := helpers.ProposerAssignments(ctx, s, 0) require.NoError(t, err) @@ -197,7 +196,7 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T) helpers.ClearCache() db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() count := 100 validators := make([]*ethpb.Validator, 0, count) withdrawCreds := make([]byte, 32) @@ -238,7 +237,7 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T) pubKey2 := make([]byte, params.BeaconConfig().BLSPubkeyLength) binary.LittleEndian.PutUint64(pubKey2, 2) req := ðpb.ListValidatorAssignmentsRequest{PublicKeys: [][]byte{pubKey1, pubKey2}, Indices: []primitives.ValidatorIndex{2, 3}} - res, err := bs.ListValidatorAssignments(context.Background(), req) + res, err := bs.ListValidatorAssignments(t.Context(), req) require.NoError(t, err) // Construct the wanted assignments. @@ -246,7 +245,7 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T) activeIndices, err := helpers.ActiveValidatorIndices(ctx, s, 0) require.NoError(t, err) - assignments, err := helpers.CommitteeAssignments(context.Background(), s, 0, activeIndices[1:4]) + assignments, err := helpers.CommitteeAssignments(t.Context(), s, 0, activeIndices[1:4]) require.NoError(t, err) proposerSlots, err := helpers.ProposerAssignments(ctx, s, 0) require.NoError(t, err) @@ -269,7 +268,7 @@ func TestServer_ListAssignments_FilterPubkeysIndices_NoPagination(t *testing.T) func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testing.T) { helpers.ClearCache() db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() count := 100 validators := make([]*ethpb.Validator, 0, count) withdrawCred := make([]byte, 32) @@ -308,7 +307,7 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin addDefaultReplayerBuilder(bs, db) req := ðpb.ListValidatorAssignmentsRequest{Indices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}, PageSize: 2, PageToken: "1"} - res, err := bs.ListValidatorAssignments(context.Background(), req) + res, err := bs.ListValidatorAssignments(t.Context(), req) require.NoError(t, err) // Construct the wanted assignments. @@ -316,7 +315,7 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin activeIndices, err := helpers.ActiveValidatorIndices(ctx, s, 0) require.NoError(t, err) - as, err := helpers.CommitteeAssignments(context.Background(), s, 0, activeIndices[3:5]) + as, err := helpers.CommitteeAssignments(t.Context(), s, 0, activeIndices[3:5]) require.NoError(t, err) proposalSlots, err := helpers.ProposerAssignments(ctx, s, 0) require.NoError(t, err) @@ -344,9 +343,9 @@ func TestServer_ListAssignments_CanFilterPubkeysIndices_WithPagination(t *testin // Test the wrap around scenario. assignments = nil req = ðpb.ListValidatorAssignmentsRequest{Indices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6}, PageSize: 5, PageToken: "1"} - res, err = bs.ListValidatorAssignments(context.Background(), req) + res, err = bs.ListValidatorAssignments(t.Context(), req) require.NoError(t, err) - as, err = helpers.CommitteeAssignments(context.Background(), s, 0, activeIndices[6:7]) + as, err = helpers.CommitteeAssignments(t.Context(), s, 0, activeIndices[6:7]) require.NoError(t, err) proposalSlots, err = helpers.ProposerAssignments(ctx, s, 0) require.NoError(t, err) diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/attestations_test.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/attestations_test.go index 83468569ce..bb8df746d8 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/attestations_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/attestations_test.go @@ -1,7 +1,6 @@ package beacon import ( - "context" "fmt" "sort" "strconv" @@ -34,7 +33,7 @@ import ( func TestServer_ListAttestations_NoResults(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{ Slot: 0, @@ -62,7 +61,7 @@ func TestServer_ListAttestations_NoResults(t *testing.T) { func TestServer_ListAttestations_Genesis(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{ Slot: 0, @@ -108,7 +107,7 @@ func TestServer_ListAttestations_Genesis(t *testing.T) { func TestServer_ListAttestations_NoPagination(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() count := primitives.Slot(8) atts := make([]*ethpb.Attestation, 0, count) @@ -145,7 +144,7 @@ func TestServer_ListAttestations_NoPagination(t *testing.T) { func TestServer_ListAttestations_FiltersCorrectly(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() someRoot := [32]byte{1, 2, 3} sourceRoot := [32]byte{4, 5, 6} @@ -260,7 +259,7 @@ func TestServer_ListAttestations_FiltersCorrectly(t *testing.T) { func TestServer_ListAttestations_Pagination_CustomPageParameters(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() count := params.BeaconConfig().SlotsPerEpoch * 4 atts := make([]ethpb.Att, 0, count) @@ -367,7 +366,7 @@ func TestServer_ListAttestations_Pagination_CustomPageParameters(t *testing.T) { func TestServer_ListAttestations_Pagination_OutOfRange(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() util.NewBeaconBlock() count := primitives.Slot(1) atts := make([]*ethpb.Attestation, 0, count) @@ -411,7 +410,7 @@ func TestServer_ListAttestations_Pagination_OutOfRange(t *testing.T) { } func TestServer_ListAttestations_Pagination_ExceedsMaxPageSize(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bs := &Server{} exceedsMax := int32(cmd.Get().MaxRPCPageSize + 1) @@ -423,7 +422,7 @@ func TestServer_ListAttestations_Pagination_ExceedsMaxPageSize(t *testing.T) { func TestServer_ListAttestations_Pagination_DefaultPageSize(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() count := primitives.Slot(params.BeaconConfig().DefaultPageSize) atts := make([]*ethpb.Attestation, 0, count) @@ -469,7 +468,7 @@ func TestServer_ListAttestationsElectra(t *testing.T) { params.OverrideBeaconConfig(cfg) db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() st, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{ Slot: 0, @@ -549,7 +548,7 @@ func TestServer_mapAttestationToTargetRoot(t *testing.T) { func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) { db := dbTest.SetupDB(t) helpers.ClearCache() - ctx := context.Background() + ctx := t.Context() targetRoot1 := bytesutil.ToBytes32([]byte("root")) targetRoot2 := bytesutil.ToBytes32([]byte("root2")) @@ -599,7 +598,7 @@ func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) { indexedAtts := make([]*ethpb.IndexedAttestation, len(atts)+len(atts2)) for i := 0; i < len(atts); i++ { att := atts[i] - committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), state, att.Data.Slot, att.Data.CommitteeIndex) require.NoError(t, err) idxAtt, err := attestation.ConvertToIndexed(ctx, atts[i], committee) require.NoError(t, err, "Could not convert attestation to indexed") @@ -609,7 +608,7 @@ func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) { } for i := 0; i < len(atts2); i++ { att := atts2[i] - committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), state, att.Data.Slot, att.Data.CommitteeIndex) require.NoError(t, err) idxAtt, err := attestation.ConvertToIndexed(ctx, atts2[i], committee) require.NoError(t, err, "Could not convert attestation to indexed") @@ -659,7 +658,7 @@ func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) { func TestServer_ListIndexedAttestations_OldEpoch(t *testing.T) { db := dbTest.SetupDB(t) helpers.ClearCache() - ctx := context.Background() + ctx := t.Context() blockRoot := bytesutil.ToBytes32([]byte("root")) count := params.BeaconConfig().SlotsPerEpoch @@ -708,7 +707,7 @@ func TestServer_ListIndexedAttestations_OldEpoch(t *testing.T) { indexedAtts := make([]*ethpb.IndexedAttestation, len(atts)) for i := 0; i < len(atts); i++ { att := atts[i] - committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), state, att.Data.Slot, att.Data.CommitteeIndex) require.NoError(t, err) idxAtt, err := attestation.ConvertToIndexed(ctx, atts[i], committee) require.NoError(t, err, "Could not convert attestation to indexed") @@ -747,7 +746,7 @@ func TestServer_ListIndexedAttestationsElectra(t *testing.T) { db := dbTest.SetupDB(t) helpers.ClearCache() - ctx := context.Background() + ctx := t.Context() targetRoot1 := bytesutil.ToBytes32([]byte("root")) targetRoot2 := bytesutil.ToBytes32([]byte("root2")) @@ -801,7 +800,7 @@ func TestServer_ListIndexedAttestationsElectra(t *testing.T) { indexedAtts := make([]*ethpb.IndexedAttestationElectra, len(atts)+len(atts2)) for i := 0; i < len(atts); i++ { att := atts[i] - committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.Data.Slot, 0) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), state, att.Data.Slot, 0) require.NoError(t, err) idxAtt, err := attestation.ConvertToIndexed(ctx, atts[i], committee) require.NoError(t, err, "Could not convert attestation to indexed") @@ -811,7 +810,7 @@ func TestServer_ListIndexedAttestationsElectra(t *testing.T) { } for i := 0; i < len(atts2); i++ { att := atts2[i] - committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.Data.Slot, 0) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), state, att.Data.Slot, 0) require.NoError(t, err) idxAtt, err := attestation.ConvertToIndexed(ctx, atts2[i], committee) require.NoError(t, err, "Could not convert attestation to indexed") @@ -859,7 +858,7 @@ func TestServer_ListIndexedAttestationsElectra(t *testing.T) { } func TestServer_AttestationPool_Pagination_ExceedsMaxPageSize(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bs := &Server{} exceedsMax := int32(cmd.Get().MaxRPCPageSize + 1) @@ -870,7 +869,7 @@ func TestServer_AttestationPool_Pagination_ExceedsMaxPageSize(t *testing.T) { } func TestServer_AttestationPool_Pagination_OutOfRange(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bs := &Server{ AttestationsPool: attestations.NewPool(), } @@ -919,7 +918,7 @@ func TestServer_AttestationPool_Pagination_OutOfRange(t *testing.T) { } func TestServer_AttestationPool_Pagination_DefaultPageSize(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bs := &Server{ AttestationsPool: attestations.NewPool(), } @@ -940,7 +939,7 @@ func TestServer_AttestationPool_Pagination_DefaultPageSize(t *testing.T) { } func TestServer_AttestationPool_Pagination_CustomPageSize(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bs := &Server{ AttestationsPool: attestations.NewPool(), } @@ -997,7 +996,7 @@ func TestServer_AttestationPool_Pagination_CustomPageSize(t *testing.T) { } func TestServer_AttestationPoolElectra(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bs := &Server{ AttestationsPool: attestations.NewPool(), } diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/blocks_test.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/blocks_test.go index 6738929065..74849b6b81 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/blocks_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/blocks_test.go @@ -1,7 +1,6 @@ package beacon import ( - "context" "fmt" "strconv" "testing" @@ -35,7 +34,7 @@ func TestServer_GetChainHead_NoGenesis(t *testing.T) { genBlock := util.NewBeaconBlock() genBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, genBlock) + util.SaveBlock(t, t.Context(), db, genBlock) gRoot, err := genBlock.Block.HashTreeRoot() require.NoError(t, err) cases := []struct { @@ -80,7 +79,7 @@ func TestServer_GetChainHead_NoGenesis(t *testing.T) { OptimisticModeFetcher: &chainMock.ChainService{}, }, } - _, err = bs.GetChainHead(context.Background(), nil) + _, err = bs.GetChainHead(t.Context(), nil) require.ErrorContains(t, "could not get genesis block", err) } } @@ -97,10 +96,10 @@ func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) { genBlock := util.NewBeaconBlock() genBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, genBlock) + util.SaveBlock(t, t.Context(), db, genBlock) gRoot, err := genBlock.Block.HashTreeRoot() require.NoError(t, err) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), gRoot)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), gRoot)) wsb, err := blocks.NewSignedBeaconBlock(genBlock) require.NoError(t, err) @@ -117,7 +116,7 @@ func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) { }, } - _, err = bs.GetChainHead(context.Background(), nil) + _, err = bs.GetChainHead(t.Context(), nil) require.ErrorContains(t, "could not get finalized block", err) } @@ -128,7 +127,7 @@ func TestServer_GetChainHead_NoHeadBlock(t *testing.T) { OptimisticModeFetcher: &chainMock.ChainService{}, }, } - _, err := bs.GetChainHead(context.Background(), nil) + _, err := bs.GetChainHead(t.Context(), nil) assert.ErrorContains(t, "head block of chain was nil", err) } @@ -139,29 +138,29 @@ func TestServer_GetChainHead(t *testing.T) { db := dbTest.SetupDB(t) genBlock := util.NewBeaconBlock() genBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, genBlock) + util.SaveBlock(t, t.Context(), db, genBlock) gRoot, err := genBlock.Block.HashTreeRoot() require.NoError(t, err) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), gRoot)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), gRoot)) finalizedBlock := util.NewBeaconBlock() finalizedBlock.Block.Slot = 1 finalizedBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, finalizedBlock) + util.SaveBlock(t, t.Context(), db, finalizedBlock) fRoot, err := finalizedBlock.Block.HashTreeRoot() require.NoError(t, err) justifiedBlock := util.NewBeaconBlock() justifiedBlock.Block.Slot = 2 justifiedBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, justifiedBlock) + util.SaveBlock(t, t.Context(), db, justifiedBlock) jRoot, err := justifiedBlock.Block.HashTreeRoot() require.NoError(t, err) prevJustifiedBlock := util.NewBeaconBlock() prevJustifiedBlock.Block.Slot = 3 prevJustifiedBlock.Block.ParentRoot = bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength) - util.SaveBlock(t, context.Background(), db, prevJustifiedBlock) + util.SaveBlock(t, t.Context(), db, prevJustifiedBlock) pjRoot, err := prevJustifiedBlock.Block.HashTreeRoot() require.NoError(t, err) @@ -191,7 +190,7 @@ func TestServer_GetChainHead(t *testing.T) { }, } - head, err := bs.GetChainHead(context.Background(), nil) + head, err := bs.GetChainHead(t.Context(), nil) require.NoError(t, err) assert.Equal(t, primitives.Epoch(3), head.PreviousJustifiedEpoch, "Unexpected PreviousJustifiedEpoch") assert.Equal(t, primitives.Epoch(2), head.JustifiedEpoch, "Unexpected JustifiedEpoch") @@ -207,7 +206,7 @@ func TestServer_GetChainHead(t *testing.T) { func TestServer_ListBeaconBlocks_NoResults(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() bs := &Server{ BeaconDB: db, @@ -304,7 +303,7 @@ func TestServer_ListBeaconBlocks_Genesis(t *testing.T) { func runListBlocksGenesis(t *testing.T, blk interfaces.ReadOnlySignedBeaconBlock, blkContainer *ethpb.BeaconBlockContainer) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() bs := &Server{ BeaconDB: db, @@ -407,7 +406,7 @@ func TestServer_ListBeaconBlocks_Genesis_MultiBlocks(t *testing.T) { func runListBeaconBlocksGenesisMultiBlocks(t *testing.T, genBlock interfaces.ReadOnlySignedBeaconBlock, blockCreator func(i primitives.Slot) interfaces.ReadOnlySignedBeaconBlock) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() bs := &Server{ BeaconDB: db, @@ -550,7 +549,7 @@ func runListBeaconBlocksPagination(t *testing.T, orphanedBlk interfaces.ReadOnly chain := &chainMock.ChainService{ CanonicalRoots: map[[32]byte]bool{}, } - ctx := context.Background() + ctx := t.Context() count := primitives.Slot(100) blks := make([]interfaces.ReadOnlySignedBeaconBlock, count) diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/committees_test.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/committees_test.go index 0289f0d3af..a54f1890fd 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/committees_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/committees_test.go @@ -1,7 +1,6 @@ package beacon import ( - "context" "encoding/binary" "math" "testing" @@ -33,7 +32,7 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) { helpers.ClearCache() numValidators := 128 - ctx := context.Background() + ctx := t.Context() headState := setupActiveValidators(t, numValidators) offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot)) @@ -58,7 +57,7 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) { require.NoError(t, err) attesterSeed, err := helpers.Seed(headState, 0, params.BeaconConfig().DomainBeaconAttester) require.NoError(t, err) - committees, err := computeCommittees(context.Background(), 0, activeIndices, attesterSeed) + committees, err := computeCommittees(t.Context(), 0, activeIndices, attesterSeed) require.NoError(t, err) wanted := ðpb.BeaconCommittees{ @@ -66,7 +65,7 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) { Committees: committees.SlotToUint64(), ActiveValidatorCount: uint64(numValidators), } - res, err := bs.ListBeaconCommittees(context.Background(), ðpb.ListCommitteesRequest{ + res, err := bs.ListBeaconCommittees(t.Context(), ðpb.ListCommitteesRequest{ QueryFilter: ðpb.ListCommitteesRequest_Genesis{Genesis: true}, }) require.NoError(t, err) @@ -87,7 +86,7 @@ func addDefaultReplayerBuilder(s *Server, h stategen.HistoryAccessor) { func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) - ctx := context.Background() + ctx := t.Context() db := dbTest.SetupDB(t) helpers.ClearCache() @@ -129,7 +128,7 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) { require.NoError(t, err) startSlot, err := slots.EpochStart(1) require.NoError(t, err) - wanted, err := computeCommittees(context.Background(), startSlot, activeIndices, attesterSeed) + wanted, err := computeCommittees(t.Context(), startSlot, activeIndices, attesterSeed) require.NoError(t, err) tests := []struct { @@ -149,7 +148,7 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) { } helpers.ClearCache() for i, test := range tests { - res, err := bs.ListBeaconCommittees(context.Background(), test.req) + res, err := bs.ListBeaconCommittees(t.Context(), test.req) require.NoError(t, err) if !proto.Equal(res, test.res) { diff, _ := messagediff.PrettyDiff(res, test.res) @@ -162,7 +161,7 @@ func TestRetrieveCommitteesForRoot(t *testing.T) { db := dbTest.SetupDB(t) helpers.ClearCache() - ctx := context.Background() + ctx := t.Context() numValidators := 128 headState := setupActiveValidators(t, numValidators) @@ -196,9 +195,9 @@ func TestRetrieveCommitteesForRoot(t *testing.T) { activeIndices, err := helpers.ActiveValidatorIndices(ctx, headState, 0) require.NoError(t, err) - wanted, err := computeCommittees(context.Background(), 0, activeIndices, seed) + wanted, err := computeCommittees(t.Context(), 0, activeIndices, seed) require.NoError(t, err) - committees, activeIndices, err := bs.retrieveCommitteesForRoot(context.Background(), gRoot[:]) + committees, activeIndices, err := bs.retrieveCommitteesForRoot(t.Context(), gRoot[:]) require.NoError(t, err) wantedRes := ðpb.BeaconCommittees{ diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/config_test.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/config_test.go index a0a3c3dcb9..ea1cebc5c2 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/config_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/config_test.go @@ -1,7 +1,6 @@ package beacon import ( - "context" "fmt" "reflect" "testing" @@ -13,7 +12,7 @@ import ( ) func TestServer_GetBeaconConfig(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bs := &Server{} res, err := bs.GetBeaconConfig(ctx, &emptypb.Empty{}) require.NoError(t, err) diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/slashings_test.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/slashings_test.go index 3d8b41c992..ba4186ea5f 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/slashings_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/slashings_test.go @@ -1,7 +1,6 @@ package beacon import ( - "context" "testing" mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing" @@ -17,7 +16,7 @@ import ( ) func TestServer_SubmitProposerSlashing(t *testing.T) { - ctx := context.Background() + ctx := t.Context() st, privs := util.DeterministicGenesisState(t, 64) slashedVal, err := st.ValidatorAtIndex(5) @@ -46,7 +45,7 @@ func TestServer_SubmitProposerSlashing(t *testing.T) { } func TestServer_SubmitAttesterSlashing(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // We mark the validators at index 5, 6 as already slashed. st, privs := util.DeterministicGenesisState(t, 64) slashedVal, err := st.ValidatorAtIndex(5) @@ -79,7 +78,7 @@ func TestServer_SubmitAttesterSlashing(t *testing.T) { func TestServer_SubmitProposerSlashing_DontBroadcast(t *testing.T) { resetCfg := features.InitWithReset(&features.Flags{DisableBroadcastSlashings: true}) defer resetCfg() - ctx := context.Background() + ctx := t.Context() st, privs := util.DeterministicGenesisState(t, 64) slashedVal, err := st.ValidatorAtIndex(5) require.NoError(t, err) @@ -124,7 +123,7 @@ func TestServer_SubmitProposerSlashing_DontBroadcast(t *testing.T) { func TestServer_SubmitAttesterSlashing_DontBroadcast(t *testing.T) { resetCfg := features.InitWithReset(&features.Flags{DisableBroadcastSlashings: true}) defer resetCfg() - ctx := context.Background() + ctx := t.Context() // We mark the validators at index 5, 6 as already slashed. st, privs := util.DeterministicGenesisState(t, 64) slashedVal, err := st.ValidatorAtIndex(5) @@ -168,7 +167,7 @@ func TestServer_SubmitAttesterSlashing_DontBroadcast(t *testing.T) { } func TestServer_SubmitAttesterSlashingElectra(t *testing.T) { - ctx := context.Background() + ctx := t.Context() st, privs := util.DeterministicGenesisStateElectra(t, 64) slashedVal, err := st.ValidatorAtIndex(5) require.NoError(t, err) diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/validators_test.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/validators_test.go index 602b1fdf04..eaf217b013 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/validators_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/validators_test.go @@ -1,7 +1,6 @@ package beacon import ( - "context" "encoding/binary" "fmt" "sort" @@ -49,7 +48,7 @@ const ( func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(0)) @@ -77,7 +76,7 @@ func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing func TestServer_ListValidatorBalances_CannotRequestFutureEpoch(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() st, err := util.NewBeaconState() require.NoError(t, err) @@ -105,7 +104,7 @@ func TestServer_ListValidatorBalances_CannotRequestFutureEpoch(t *testing.T) { func TestServer_ListValidatorBalances_NoResults(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(0)) @@ -146,7 +145,7 @@ func TestServer_ListValidatorBalances_NoResults(t *testing.T) { func TestServer_ListValidatorBalances_DefaultResponse_NoArchive(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() numItems := 100 validators := make([]*ethpb.Validator, numItems) @@ -196,7 +195,7 @@ func TestServer_ListValidatorBalances_DefaultResponse_NoArchive(t *testing.T) { func TestServer_ListValidatorBalances_PaginationOutOfRange(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() _, _, headState := setupValidators(t, beaconDB, 100) b := util.NewBeaconBlock() @@ -215,7 +214,7 @@ func TestServer_ListValidatorBalances_PaginationOutOfRange(t *testing.T) { } wanted := fmt.Sprintf("page start %d >= list %d", 200, len(headState.Balances())) - _, err = bs.ListValidatorBalances(context.Background(), ðpb.ListValidatorBalancesRequest{ + _, err = bs.ListValidatorBalances(t.Context(), ðpb.ListValidatorBalancesRequest{ PageToken: strconv.Itoa(2), PageSize: 100, QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{Epoch: 0}, @@ -233,7 +232,7 @@ func TestServer_ListValidatorBalances_ExceedsMaxPageSize(t *testing.T) { cmd.Get().MaxRPCPageSize, ) req := ðpb.ListValidatorBalancesRequest{PageSize: exceedsMax} - _, err := bs.ListValidatorBalances(context.Background(), req) + _, err := bs.ListValidatorBalances(t.Context(), req) assert.ErrorContains(t, wanted, err) } @@ -245,7 +244,7 @@ func pubKey(i uint64) []byte { func TestServer_ListValidatorBalances_Pagination_Default(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() _, _, headState := setupValidators(t, beaconDB, 100) b := util.NewBeaconBlock() @@ -318,7 +317,7 @@ func TestServer_ListValidatorBalances_Pagination_Default(t *testing.T) { }}, } for _, test := range tests { - res, err := bs.ListValidatorBalances(context.Background(), test.req) + res, err := bs.ListValidatorBalances(t.Context(), test.req) require.NoError(t, err) if !proto.Equal(res, test.res) { t.Errorf("Expected %v, received %v", test.res, res) @@ -328,7 +327,7 @@ func TestServer_ListValidatorBalances_Pagination_Default(t *testing.T) { func TestServer_ListValidatorBalances_Pagination_CustomPageSizes(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() count := 1000 _, _, headState := setupValidators(t, beaconDB, count) @@ -387,7 +386,7 @@ func TestServer_ListValidatorBalances_Pagination_CustomPageSizes(t *testing.T) { TotalSize: int32(count)}}, } for _, test := range tests { - res, err := bs.ListValidatorBalances(context.Background(), test.req) + res, err := bs.ListValidatorBalances(t.Context(), test.req) require.NoError(t, err) if !proto.Equal(res, test.res) { t.Errorf("Expected %v, received %v", test.res, res) @@ -398,7 +397,7 @@ func TestServer_ListValidatorBalances_Pagination_CustomPageSizes(t *testing.T) { func TestServer_ListValidatorBalances_OutOfRange(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() _, _, headState := setupValidators(t, beaconDB, 1) b := util.NewBeaconBlock() gRoot, err := b.Block.HashTreeRoot() @@ -417,13 +416,13 @@ func TestServer_ListValidatorBalances_OutOfRange(t *testing.T) { req := ðpb.ListValidatorBalancesRequest{Indices: []primitives.ValidatorIndex{primitives.ValidatorIndex(1)}, QueryFilter: ðpb.ListValidatorBalancesRequest_Epoch{Epoch: 0}} wanted := "Validator index 1 >= balance list 1" - _, err = bs.ListValidatorBalances(context.Background(), req) + _, err = bs.ListValidatorBalances(t.Context(), req) assert.ErrorContains(t, wanted, err) } func TestServer_ListValidators_CannotRequestFutureEpoch(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() st, err := util.NewBeaconState() require.NoError(t, err) @@ -472,7 +471,7 @@ func TestServer_ListValidators_reqStateIsNil(t *testing.T) { // request uses HeadFetcher to get reqState. req1 := ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(1), PageSize: 100} wanted := "Requested state is nil" - _, err := bs.ListValidators(context.Background(), req1) + _, err := bs.ListValidators(t.Context(), req1) assert.ErrorContains(t, wanted, err) // request uses StateGen to get reqState. @@ -481,14 +480,14 @@ func TestServer_ListValidators_reqStateIsNil(t *testing.T) { PageToken: strconv.Itoa(1), PageSize: 100, } - _, err = bs.ListValidators(context.Background(), req2) + _, err = bs.ListValidators(t.Context(), req2) assert.ErrorContains(t, wanted, err) } func TestServer_ListValidators_NoResults(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() st, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, st.SetSlot(0)) @@ -526,7 +525,7 @@ func TestServer_ListValidators_NoResults(t *testing.T) { } func TestServer_ListValidators_OnlyActiveValidators(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := dbTest.SetupDB(t) count := 100 balances := make([]uint64, count) @@ -589,7 +588,7 @@ func TestServer_ListValidators_OnlyActiveValidators(t *testing.T) { } func TestServer_ListValidators_InactiveInTheMiddle(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := dbTest.SetupDB(t) count := 100 balances := make([]uint64, count) @@ -660,7 +659,7 @@ func TestServer_ListValidators_InactiveInTheMiddle(t *testing.T) { func TestServer_ListValidatorBalances_UnknownValidatorInResponse(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() _, _, headState := setupValidators(t, beaconDB, 4) b := util.NewBeaconBlock() @@ -697,7 +696,7 @@ func TestServer_ListValidatorBalances_UnknownValidatorInResponse(t *testing.T) { NextPageToken: "", TotalSize: 3, } - res, err := bs.ListValidatorBalances(context.Background(), req) + res, err := bs.ListValidatorBalances(t.Context(), req) require.NoError(t, err) if !proto.Equal(res, wanted) { t.Errorf("Expected %v, received %v", wanted, res) @@ -732,7 +731,7 @@ func TestServer_ListValidators_NoPagination(t *testing.T) { StateGen: stategen.New(beaconDB, doublylinkedtree.New()), } - received, err := bs.ListValidators(context.Background(), ðpb.ListValidatorsRequest{}) + received, err := bs.ListValidators(t.Context(), ðpb.ListValidatorsRequest{}) require.NoError(t, err) assert.DeepSSZEqual(t, want, received.ValidatorList, "Incorrect respond of validators") } @@ -759,7 +758,7 @@ func TestServer_ListValidators_StategenNotUsed(t *testing.T) { }, } - received, err := bs.ListValidators(context.Background(), ðpb.ListValidatorsRequest{}) + received, err := bs.ListValidators(t.Context(), ðpb.ListValidatorsRequest{}) require.NoError(t, err) assert.DeepEqual(t, want, received.ValidatorList, "Incorrect respond of validators") } @@ -806,7 +805,7 @@ func TestServer_ListValidators_IndicesPubKeys(t *testing.T) { Indices: indicesWanted, PublicKeys: pubKeysWanted, } - received, err := bs.ListValidators(context.Background(), req) + received, err := bs.ListValidators(t.Context(), req) require.NoError(t, err) assert.DeepEqual(t, want, received.ValidatorList, "Incorrect respond of validators") } @@ -941,7 +940,7 @@ func TestServer_ListValidators_Pagination(t *testing.T) { TotalSize: int32(count)}}, } for _, test := range tests { - res, err := bs.ListValidators(context.Background(), test.req) + res, err := bs.ListValidators(t.Context(), test.req) require.NoError(t, err) if !proto.Equal(res, test.res) { t.Errorf("Incorrect validator response, wanted %v, received %v", test.res, res) @@ -973,7 +972,7 @@ func TestServer_ListValidators_PaginationOutOfRange(t *testing.T) { req := ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(1), PageSize: 100} wanted := fmt.Sprintf("page start %d >= list %d", req.PageSize, len(validators)) - _, err := bs.ListValidators(context.Background(), req) + _, err := bs.ListValidators(t.Context(), req) assert.ErrorContains(t, wanted, err) } @@ -983,7 +982,7 @@ func TestServer_ListValidators_ExceedsMaxPageSize(t *testing.T) { wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, cmd.Get().MaxRPCPageSize) req := ðpb.ListValidatorsRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax} - _, err := bs.ListValidators(context.Background(), req) + _, err := bs.ListValidators(t.Context(), req) assert.ErrorContains(t, wanted, err) } @@ -1016,7 +1015,7 @@ func TestServer_ListValidators_DefaultPageSize(t *testing.T) { } req := ðpb.ListValidatorsRequest{} - res, err := bs.ListValidators(context.Background(), req) + res, err := bs.ListValidators(t.Context(), req) require.NoError(t, err) i := 0 @@ -1029,7 +1028,7 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) { params.OverrideBeaconConfig(params.BeaconConfig()) transition.SkipSlotCache.Disable() - ctx := context.Background() + ctx := t.Context() slot := primitives.Slot(0) epochs := primitives.Epoch(10) numVals := uint64(10) @@ -1065,7 +1064,7 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) { Genesis: true, }, } - res, err := bs.ListValidators(context.Background(), req) + res, err := bs.ListValidators(t.Context(), req) require.NoError(t, err) assert.Equal(t, int(numVals), len(res.ValidatorList)) @@ -1082,7 +1081,7 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) { Epoch: epochs, }, } - res, err = bs.ListValidators(context.Background(), req) + res, err = bs.ListValidators(t.Context(), req) require.NoError(t, err) require.Equal(t, len(want), len(res.ValidatorList), "incorrect number of validators") @@ -1094,7 +1093,7 @@ func TestServer_ListValidators_ProcessHeadStateSlots(t *testing.T) { params.OverrideBeaconConfig(params.MinimalSpecConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() headSlot := primitives.Slot(32) numValidators := params.BeaconConfig().MinGenesisActiveValidatorCount @@ -1144,7 +1143,7 @@ func TestServer_ListValidators_ProcessHeadStateSlots(t *testing.T) { Epoch: 1, }, } - res, err := bs.ListValidators(context.Background(), req) + res, err := bs.ListValidators(t.Context(), req) require.NoError(t, err) assert.Equal(t, len(want), len(res.ValidatorList), "Incorrect number of validators") for i := 0; i < len(res.ValidatorList); i++ { @@ -1223,7 +1222,7 @@ func TestServer_GetValidator(t *testing.T) { } for _, test := range tests { - res, err := bs.GetValidator(context.Background(), test.req) + res, err := bs.GetValidator(t.Context(), test.req) if test.wantedErr != "" { require.ErrorContains(t, test.wantedErr, err) } else { @@ -1236,7 +1235,7 @@ func TestServer_GetValidator(t *testing.T) { func TestServer_GetValidatorActiveSetChanges(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := make([]*ethpb.Validator, 8) headState, err := util.NewBeaconState() require.NoError(t, err) @@ -1364,7 +1363,7 @@ func TestServer_GetValidatorQueue_PendingActivation(t *testing.T) { State: headState, }, } - res, err := bs.GetValidatorQueue(context.Background(), &emptypb.Empty{}) + res, err := bs.GetValidatorQueue(t.Context(), &emptypb.Empty{}) require.NoError(t, err) // We verify the keys are properly sorted by the validators' activation eligibility epoch. wanted := [][]byte{ @@ -1372,7 +1371,7 @@ func TestServer_GetValidatorQueue_PendingActivation(t *testing.T) { pubKey(2), pubKey(3), } - activeValidatorCount, err := helpers.ActiveValidatorCount(context.Background(), headState, coreTime.CurrentEpoch(headState)) + activeValidatorCount, err := helpers.ActiveValidatorCount(t.Context(), headState, coreTime.CurrentEpoch(headState)) require.NoError(t, err) wantChurn := helpers.ValidatorActivationChurnLimit(activeValidatorCount) assert.Equal(t, wantChurn, res.ChurnLimit) @@ -1408,12 +1407,12 @@ func TestServer_GetValidatorQueue_ExitedValidatorLeavesQueue(t *testing.T) { } // First we check if validator with index 1 is in the exit queue. - res, err := bs.GetValidatorQueue(context.Background(), &emptypb.Empty{}) + res, err := bs.GetValidatorQueue(t.Context(), &emptypb.Empty{}) require.NoError(t, err) wanted := [][]byte{ bytesutil.PadTo([]byte("2"), 48), } - activeValidatorCount, err := helpers.ActiveValidatorCount(context.Background(), headState, coreTime.CurrentEpoch(headState)) + activeValidatorCount, err := helpers.ActiveValidatorCount(t.Context(), headState, coreTime.CurrentEpoch(headState)) require.NoError(t, err) wantChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount) assert.Equal(t, wantChurn, res.ChurnLimit) @@ -1424,7 +1423,7 @@ func TestServer_GetValidatorQueue_ExitedValidatorLeavesQueue(t *testing.T) { // Now, we move the state.slot past the exit epoch of the validator, and now // the validator should no longer exist in the queue. require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(validators[1].ExitEpoch+1)))) - res, err = bs.GetValidatorQueue(context.Background(), &emptypb.Empty{}) + res, err = bs.GetValidatorQueue(t.Context(), &emptypb.Empty{}) require.NoError(t, err) assert.Equal(t, 0, len(res.ExitPublicKeys)) } @@ -1464,7 +1463,7 @@ func TestServer_GetValidatorQueue_PendingExit(t *testing.T) { State: headState, }, } - res, err := bs.GetValidatorQueue(context.Background(), &emptypb.Empty{}) + res, err := bs.GetValidatorQueue(t.Context(), &emptypb.Empty{}) require.NoError(t, err) // We verify the keys are properly sorted by the validators' withdrawable epoch. wanted := [][]byte{ @@ -1472,7 +1471,7 @@ func TestServer_GetValidatorQueue_PendingExit(t *testing.T) { pubKey(2), pubKey(3), } - activeValidatorCount, err := helpers.ActiveValidatorCount(context.Background(), headState, coreTime.CurrentEpoch(headState)) + activeValidatorCount, err := helpers.ActiveValidatorCount(t.Context(), headState, coreTime.CurrentEpoch(headState)) require.NoError(t, err) wantChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount) assert.Equal(t, wantChurn, res.ChurnLimit) @@ -1480,7 +1479,7 @@ func TestServer_GetValidatorQueue_PendingExit(t *testing.T) { } func TestServer_GetValidatorParticipation_CannotRequestFutureEpoch(t *testing.T) { - ctx := context.Background() + ctx := t.Context() headState, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, headState.SetSlot(0)) @@ -1509,7 +1508,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) { helpers.ClearCache() beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validatorCount := uint64(32) validators := make([]*ethpb.Validator, validatorCount) @@ -1593,7 +1592,7 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) { params.OverrideBeaconConfig(params.BeaconConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validatorCount := uint64(100) validators := make([]*ethpb.Validator, validatorCount) @@ -1676,7 +1675,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpochWithBits(t *testing t.Run("altair", func(t *testing.T) { validatorCount := uint64(32) genState, _ := util.DeterministicGenesisStateAltair(t, validatorCount) - c, err := altair.NextSyncCommittee(context.Background(), genState) + c, err := altair.NextSyncCommittee(t.Context(), genState) require.NoError(t, err) require.NoError(t, genState.SetCurrentSyncCommittee(c)) @@ -1694,7 +1693,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpochWithBits(t *testing t.Run("bellatrix", func(t *testing.T) { validatorCount := uint64(32) genState, _ := util.DeterministicGenesisStateBellatrix(t, validatorCount) - c, err := altair.NextSyncCommittee(context.Background(), genState) + c, err := altair.NextSyncCommittee(t.Context(), genState) require.NoError(t, err) require.NoError(t, genState.SetCurrentSyncCommittee(c)) @@ -1712,7 +1711,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpochWithBits(t *testing t.Run("capella", func(t *testing.T) { validatorCount := uint64(32) genState, _ := util.DeterministicGenesisStateCapella(t, validatorCount) - c, err := altair.NextSyncCommittee(context.Background(), genState) + c, err := altair.NextSyncCommittee(t.Context(), genState) require.NoError(t, err) require.NoError(t, genState.SetCurrentSyncCommittee(c)) @@ -1732,7 +1731,7 @@ func runGetValidatorParticipationCurrentAndPrevEpoch(t *testing.T, genState stat helpers.ClearCache() beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validatorCount := uint64(32) gsr, err := genState.HashTreeRoot(ctx) @@ -1800,7 +1799,7 @@ func runGetValidatorParticipationCurrentAndPrevEpoch(t *testing.T, genState stat } func TestGetValidatorPerformance_Syncing(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bs := &Server{ CoreService: &core.Service{ @@ -1818,7 +1817,7 @@ func TestGetValidatorPerformance_OK(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MinimalSpecConfig()) - ctx := context.Background() + ctx := t.Context() epoch := primitives.Epoch(1) headState, err := util.NewBeaconState() require.NoError(t, err) @@ -1894,7 +1893,7 @@ func TestGetValidatorPerformance_OK(t *testing.T) { } func TestGetValidatorPerformance_Indices(t *testing.T) { - ctx := context.Background() + ctx := t.Context() epoch := primitives.Epoch(1) defaultBal := params.BeaconConfig().MaxEffectiveBalance extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth @@ -1965,7 +1964,7 @@ func TestGetValidatorPerformance_Indices(t *testing.T) { } func TestGetValidatorPerformance_IndicesPubkeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() epoch := primitives.Epoch(1) defaultBal := params.BeaconConfig().MaxEffectiveBalance extraBal := params.BeaconConfig().MaxEffectiveBalance + params.BeaconConfig().GweiPerEth @@ -2042,7 +2041,7 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MinimalSpecConfig()) - ctx := context.Background() + ctx := t.Context() epoch := primitives.Epoch(1) headState, _ := util.DeterministicGenesisStateAltair(t, 32) require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1)))) @@ -2112,7 +2111,7 @@ func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MinimalSpecConfig()) - ctx := context.Background() + ctx := t.Context() epoch := primitives.Epoch(1) headState, _ := util.DeterministicGenesisStateBellatrix(t, 32) require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1)))) @@ -2182,7 +2181,7 @@ func TestGetValidatorPerformanceCapella_OK(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MinimalSpecConfig()) - ctx := context.Background() + ctx := t.Context() epoch := primitives.Epoch(1) headState, _ := util.DeterministicGenesisStateCapella(t, 32) require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1)))) @@ -2276,7 +2275,7 @@ func TestServer_GetIndividualVotes_RequestFutureSlot(t *testing.T) { Epoch: slots.ToEpoch(bs.CoreService.GenesisTimeFetcher.CurrentSlot()) + 1, } wanted := errNoEpochInfoError - _, err := bs.GetIndividualVotes(context.Background(), req) + _, err := bs.GetIndividualVotes(t.Context(), req) assert.ErrorContains(t, wanted, err) } @@ -2285,7 +2284,7 @@ func TestServer_GetIndividualVotes_ValidatorsDontExist(t *testing.T) { params.OverrideBeaconConfig(params.MinimalSpecConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() var slot primitives.Slot = 0 validators := uint64(64) @@ -2362,7 +2361,7 @@ func TestServer_GetIndividualVotes_Working(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MinimalSpecConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := uint64(32) stateWithValidators, _ := util.DeterministicGenesisState(t, validators) @@ -2443,7 +2442,7 @@ func TestServer_GetIndividualVotes_Working(t *testing.T) { func TestServer_GetIndividualVotes_WorkingAltair(t *testing.T) { helpers.ClearCache() beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() var slot primitives.Slot = 0 validators := uint64(32) @@ -2516,7 +2515,7 @@ func TestServer_GetIndividualVotes_AltairEndOfEpoch(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := uint64(32) beaconState, _ := util.DeterministicGenesisStateAltair(t, validators) @@ -2606,7 +2605,7 @@ func TestServer_GetIndividualVotes_BellatrixEndOfEpoch(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := uint64(32) beaconState, _ := util.DeterministicGenesisStateBellatrix(t, validators) @@ -2696,7 +2695,7 @@ func TestServer_GetIndividualVotes_CapellaEndOfEpoch(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := uint64(32) beaconState, _ := util.DeterministicGenesisStateCapella(t, validators) diff --git a/beacon-chain/rpc/prysm/v1alpha1/debug/block_test.go b/beacon-chain/rpc/prysm/v1alpha1/debug/block_test.go index 346a538dbf..a4e4af1a2b 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/debug/block_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/debug/block_test.go @@ -1,7 +1,6 @@ package debug import ( - "context" "testing" "time" @@ -21,7 +20,7 @@ import ( func TestServer_GetBlock(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() b := util.NewBeaconBlock() b.Block.Slot = 100 @@ -50,7 +49,7 @@ func TestServer_GetBlock(t *testing.T) { func TestServer_GetAttestationInclusionSlot(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() offset := int64(2 * params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) bs := &Server{ BeaconDB: db, @@ -61,7 +60,7 @@ func TestServer_GetAttestationInclusionSlot(t *testing.T) { s, _ := util.DeterministicGenesisState(t, 2048) tr := [32]byte{'a'} require.NoError(t, bs.StateGen.SaveState(ctx, tr, s)) - c, err := helpers.BeaconCommitteeFromState(context.Background(), s, 1, 0) + c, err := helpers.BeaconCommitteeFromState(t.Context(), s, 1, 0) require.NoError(t, err) a := ðpb.Attestation{ diff --git a/beacon-chain/rpc/prysm/v1alpha1/debug/p2p_test.go b/beacon-chain/rpc/prysm/v1alpha1/debug/p2p_test.go index 603e98bd8f..5086c92017 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/debug/p2p_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/debug/p2p_test.go @@ -1,7 +1,6 @@ package debug import ( - "context" "testing" mockP2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing" @@ -20,7 +19,7 @@ func TestDebugServer_GetPeer(t *testing.T) { } firstPeer := peersProvider.Peers().All()[0] - res, err := ds.GetPeer(context.Background(), ðpb.PeerRequest{PeerId: firstPeer.String()}) + res, err := ds.GetPeer(t.Context(), ðpb.PeerRequest{PeerId: firstPeer.String()}) require.NoError(t, err) require.Equal(t, firstPeer.String(), res.PeerId, "Unexpected peer ID") @@ -36,7 +35,7 @@ func TestDebugServer_ListPeers(t *testing.T) { PeerManager: &mockP2p.MockPeerManager{BHost: mP2P.BHost}, } - res, err := ds.ListPeers(context.Background(), &empty.Empty{}) + res, err := ds.ListPeers(t.Context(), &empty.Empty{}) require.NoError(t, err) assert.Equal(t, 2, len(res.Responses)) diff --git a/beacon-chain/rpc/prysm/v1alpha1/debug/state_test.go b/beacon-chain/rpc/prysm/v1alpha1/debug/state_test.go index c2cd192d38..eca0580016 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/debug/state_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/debug/state_test.go @@ -1,7 +1,6 @@ package debug import ( - "context" "math" "testing" @@ -25,7 +24,7 @@ func addDefaultReplayerBuilder(s *Server, h stategen.HistoryAccessor) { func TestServer_GetBeaconState(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() st, err := util.NewBeaconState() require.NoError(t, err) slot := primitives.Slot(100) @@ -103,6 +102,6 @@ func TestServer_GetBeaconState_RequestFutureSlot(t *testing.T) { }, } wanted := "Cannot retrieve information about a slot in the future" - _, err := ds.GetBeaconState(context.Background(), req) + _, err := ds.GetBeaconState(t.Context(), req) assert.ErrorContains(t, wanted, err) } diff --git a/beacon-chain/rpc/prysm/v1alpha1/node/server_test.go b/beacon-chain/rpc/prysm/v1alpha1/node/server_test.go index 9e9bc6466b..6c615e1cdb 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/node/server_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/node/server_test.go @@ -1,7 +1,6 @@ package node import ( - "context" "errors" "testing" "time" @@ -32,18 +31,18 @@ func TestNodeServer_GetSyncStatus(t *testing.T) { ns := &Server{ SyncChecker: mSync, } - res, err := ns.GetSyncStatus(context.Background(), &emptypb.Empty{}) + res, err := ns.GetSyncStatus(t.Context(), &emptypb.Empty{}) require.NoError(t, err) assert.Equal(t, false, res.Syncing) ns.SyncChecker = &mockSync.Sync{IsSyncing: true} - res, err = ns.GetSyncStatus(context.Background(), &emptypb.Empty{}) + res, err = ns.GetSyncStatus(t.Context(), &emptypb.Empty{}) require.NoError(t, err) assert.Equal(t, true, res.Syncing) } func TestNodeServer_GetGenesis(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() addr := common.Address{1, 2, 3} require.NoError(t, db.SaveDepositContractAddress(ctx, addr)) st, err := util.NewBeaconState() @@ -57,7 +56,7 @@ func TestNodeServer_GetGenesis(t *testing.T) { ValidatorsRoot: genValRoot, }, } - res, err := ns.GetGenesis(context.Background(), &emptypb.Empty{}) + res, err := ns.GetGenesis(t.Context(), &emptypb.Empty{}) require.NoError(t, err) assert.DeepEqual(t, addr.Bytes(), res.DepositContractAddress) pUnix := timestamppb.New(time.Unix(0, 0)) @@ -65,7 +64,7 @@ func TestNodeServer_GetGenesis(t *testing.T) { assert.DeepEqual(t, genValRoot[:], res.GenesisValidatorsRoot) ns.GenesisTimeFetcher = &mock.ChainService{Genesis: time.Unix(10, 0)} - res, err = ns.GetGenesis(context.Background(), &emptypb.Empty{}) + res, err = ns.GetGenesis(t.Context(), &emptypb.Empty{}) require.NoError(t, err) pUnix = timestamppb.New(time.Unix(10, 0)) assert.Equal(t, res.GenesisTime.Seconds, pUnix.Seconds) @@ -74,7 +73,7 @@ func TestNodeServer_GetGenesis(t *testing.T) { func TestNodeServer_GetVersion(t *testing.T) { v := version.Version() ns := &Server{} - res, err := ns.GetVersion(context.Background(), &emptypb.Empty{}) + res, err := ns.GetVersion(t.Context(), &emptypb.Empty{}) require.NoError(t, err) assert.Equal(t, v, res.Version) } @@ -87,7 +86,7 @@ func TestNodeServer_GetImplementedServices(t *testing.T) { ethpb.RegisterNodeServer(server, ns) reflection.Register(server) - res, err := ns.ListImplementedServices(context.Background(), &emptypb.Empty{}) + res, err := ns.ListImplementedServices(t.Context(), &emptypb.Empty{}) require.NoError(t, err) // Expecting node service and Server reflect. As of grpc, v1.65.0, there are two version of server reflection // Services: [ethereum.eth.v1alpha1.Node grpc.reflection.v1.ServerReflection grpc.reflection.v1alpha.ServerReflection] @@ -112,7 +111,7 @@ func TestNodeServer_GetHost(t *testing.T) { } ethpb.RegisterNodeServer(server, ns) reflection.Register(server) - h, err := ns.GetHost(context.Background(), &emptypb.Empty{}) + h, err := ns.GetHost(t.Context(), &emptypb.Empty{}) require.NoError(t, err) assert.Equal(t, mP2P.PeerID().String(), h.PeerId) assert.Equal(t, stringENR, h.Enr) @@ -127,7 +126,7 @@ func TestNodeServer_GetPeer(t *testing.T) { ethpb.RegisterNodeServer(server, ns) reflection.Register(server) - res, err := ns.GetPeer(context.Background(), ðpb.PeerRequest{PeerId: mockP2p.MockRawPeerId0}) + res, err := ns.GetPeer(t.Context(), ðpb.PeerRequest{PeerId: mockP2p.MockRawPeerId0}) require.NoError(t, err) assert.Equal(t, "16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR" /* first peer's raw id */, res.PeerId, "Unexpected peer ID") assert.Equal(t, int(ethpb.PeerDirection_INBOUND), int(res.Direction), "Expected 1st peer to be an inbound connection") @@ -143,7 +142,7 @@ func TestNodeServer_ListPeers(t *testing.T) { ethpb.RegisterNodeServer(server, ns) reflection.Register(server) - res, err := ns.ListPeers(context.Background(), &emptypb.Empty{}) + res, err := ns.ListPeers(t.Context(), &emptypb.Empty{}) require.NoError(t, err) assert.Equal(t, 2, len(res.Peers)) @@ -182,7 +181,7 @@ func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) { ethpb.RegisterNodeServer(server, ns) reflection.Register(server) - res, err := ns.GetETH1ConnectionStatus(context.Background(), &emptypb.Empty{}) + res, err := ns.GetETH1ConnectionStatus(t.Context(), &emptypb.Empty{}) require.NoError(t, err) assert.Equal(t, ep, res.CurrentAddress) assert.Equal(t, errStr, res.CurrentConnectionError) @@ -213,7 +212,7 @@ func TestNodeServer_GetHealth(t *testing.T) { } ethpb.RegisterNodeServer(server, ns) reflection.Register(server) - _, err := ns.GetHealth(context.Background(), ðpb.HealthRequest{SyncingStatus: tt.customStatus}) + _, err := ns.GetHealth(t.Context(), ðpb.HealthRequest{SyncingStatus: tt.customStatus}) if tt.wantedErr == "" { require.NoError(t, err) return diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/aggregator_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/aggregator_test.go index 437315413e..6bb62c397e 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/aggregator_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/aggregator_test.go @@ -29,7 +29,7 @@ import ( ) func TestSubmitAggregateAndProof_Syncing(t *testing.T) { - ctx := context.Background() + ctx := t.Context() s, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{}) require.NoError(t, err) @@ -46,7 +46,7 @@ func TestSubmitAggregateAndProof_Syncing(t *testing.T) { } func TestSubmitAggregateAndProof_CantFindValidatorIndex(t *testing.T) { - ctx := context.Background() + ctx := t.Context() s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{ RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), @@ -69,7 +69,7 @@ func TestSubmitAggregateAndProof_CantFindValidatorIndex(t *testing.T) { } func TestSubmitAggregateAndProof_IsAggregatorAndNoAtts(t *testing.T) { - ctx := context.Background() + ctx := t.Context() s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{ RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector), @@ -105,7 +105,7 @@ func TestSubmitAggregateAndProof_UnaggregateOk(t *testing.T) { c.TargetAggregatorsPerCommittee = 16 params.OverrideBeaconConfig(c) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 32) att0, err := generateUnaggregatedAtt(beaconState, 0, privKeys) @@ -140,7 +140,7 @@ func TestSubmitAggregateAndProof_AggregateOk(t *testing.T) { c.TargetAggregatorsPerCommittee = 16 params.OverrideBeaconConfig(c) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 32) att0, err := generateAtt(beaconState, 0, privKeys) @@ -186,7 +186,7 @@ func TestSubmitAggregateAndProof_AggregateNotOk(t *testing.T) { c.TargetAggregatorsPerCommittee = 16 params.OverrideBeaconConfig(c) - ctx := context.Background() + ctx := t.Context() beaconState, _ := util.DeterministicGenesisState(t, 32) require.NoError(t, beaconState.SetSlot(beaconState.Slot()+params.BeaconConfig().MinAttestationInclusionDelay)) @@ -222,7 +222,7 @@ func generateAtt(state state.ReadOnlyBeaconState, index uint64, privKeys []bls.S Data: ðpb.AttestationData{CommitteeIndex: 1}, AggregationBits: aggBits, }) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(context.TODO(), state, att.Data.Slot, att.Data.CommitteeIndex) if err != nil { return nil, err } @@ -261,7 +261,7 @@ func generateUnaggregatedAtt(state state.ReadOnlyBeaconState, index uint64, priv }, AggregationBits: aggBits, }) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(context.TODO(), state, att.Data.Slot, att.Data.CommitteeIndex) if err != nil { return nil, err } @@ -298,7 +298,7 @@ func TestSubmitAggregateAndProof_PreferOwnAttestation(t *testing.T) { c.TargetAggregatorsPerCommittee = 16 params.OverrideBeaconConfig(c) - ctx := context.Background() + ctx := t.Context() // This test creates 3 attestations. 0 and 2 have the same attestation data and can be // aggregated. 1 has the validator's signature making this request and that is the expected @@ -354,7 +354,7 @@ func TestSubmitAggregateAndProof_SelectsMostBitsWhenOwnAttestationNotPresent(t * c.TargetAggregatorsPerCommittee = 16 params.OverrideBeaconConfig(c) - ctx := context.Background() + ctx := t.Context() // This test creates two distinct attestations, neither of which contain the validator's index, // index 0. This test should choose the most bits attestation, att1. @@ -414,7 +414,7 @@ func TestSubmitSignedAggregateSelectionProof_ZeroHashesSignatures(t *testing.T) }, }, } - _, err := aggregatorServer.SubmitSignedAggregateSelectionProof(context.Background(), req) + _, err := aggregatorServer.SubmitSignedAggregateSelectionProof(t.Context(), req) require.ErrorContains(t, "signed signatures can't be zero hashes", err) req = ðpb.SignedAggregateSubmitRequest{ @@ -428,7 +428,7 @@ func TestSubmitSignedAggregateSelectionProof_ZeroHashesSignatures(t *testing.T) }, }, } - _, err = aggregatorServer.SubmitSignedAggregateSelectionProof(context.Background(), req) + _, err = aggregatorServer.SubmitSignedAggregateSelectionProof(t.Context(), req) require.ErrorContains(t, "signed signatures can't be zero hashes", err) } @@ -450,7 +450,7 @@ func TestSubmitSignedAggregateSelectionProof_InvalidSlot(t *testing.T) { }, }, } - _, err := aggregatorServer.SubmitSignedAggregateSelectionProof(context.Background(), req) + _, err := aggregatorServer.SubmitSignedAggregateSelectionProof(t.Context(), req) require.ErrorContains(t, "attestation slot is no longer valid from current time", err) } @@ -475,7 +475,7 @@ func TestSubmitSignedAggregateSelectionProofElectra_ZeroHashesSignatures(t *test }, }, } - _, err := aggregatorServer.SubmitSignedAggregateSelectionProofElectra(context.Background(), req) + _, err := aggregatorServer.SubmitSignedAggregateSelectionProofElectra(t.Context(), req) require.ErrorContains(t, "signed signatures can't be zero hashes", err) req = ðpb.SignedAggregateSubmitElectraRequest{ @@ -489,7 +489,7 @@ func TestSubmitSignedAggregateSelectionProofElectra_ZeroHashesSignatures(t *test }, }, } - _, err = aggregatorServer.SubmitSignedAggregateSelectionProofElectra(context.Background(), req) + _, err = aggregatorServer.SubmitSignedAggregateSelectionProofElectra(t.Context(), req) require.ErrorContains(t, "signed signatures can't be zero hashes", err) } @@ -516,7 +516,7 @@ func TestSubmitSignedAggregateSelectionProofElectra_InvalidSlot(t *testing.T) { }, }, } - _, err := aggregatorServer.SubmitSignedAggregateSelectionProofElectra(context.Background(), req) + _, err := aggregatorServer.SubmitSignedAggregateSelectionProofElectra(t.Context(), req) require.ErrorContains(t, "attestation slot is no longer valid from current time", err) } diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/attester_mainnet_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/attester_mainnet_test.go index 2edf21597d..39e620c576 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/attester_mainnet_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/attester_mainnet_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "testing" "time" @@ -77,7 +76,7 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) { CommitteeIndex: 0, Slot: 10000, } - res, err := attesterServer.GetAttestationData(context.Background(), req) + res, err := attesterServer.GetAttestationData(t.Context(), req) require.NoError(t, err, "Could not get attestation info at slot") expectedInfo := ðpb.AttestationData{ diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/attester_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/attester_test.go index d59c9b24cd..6a59eabe06 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/attester_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/attester_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "math/rand" "sync" "testing" @@ -74,7 +73,7 @@ func TestProposeAttestation(t *testing.T) { Target: ðpb.Checkpoint{Root: make([]byte, 32)}, }, } - _, err = attesterServer.ProposeAttestation(context.Background(), req) + _, err = attesterServer.ProposeAttestation(t.Context(), req) assert.NoError(t, err) }) t.Run("Phase 0 post electra", func(t *testing.T) { @@ -96,7 +95,7 @@ func TestProposeAttestation(t *testing.T) { Target: ðpb.Checkpoint{Root: make([]byte, 32)}, }, } - _, err = attesterServer.ProposeAttestation(context.Background(), req) + _, err = attesterServer.ProposeAttestation(t.Context(), req) assert.ErrorContains(t, "old attestation format", err) }) t.Run("Electra", func(t *testing.T) { @@ -119,7 +118,7 @@ func TestProposeAttestation(t *testing.T) { Target: ðpb.Checkpoint{Root: make([]byte, 32)}, }, } - _, err = attesterServer.ProposeAttestationElectra(context.Background(), req) + _, err = attesterServer.ProposeAttestationElectra(t.Context(), req) assert.NoError(t, err) }) t.Run("Electra att too early", func(t *testing.T) { @@ -131,7 +130,7 @@ func TestProposeAttestation(t *testing.T) { Target: ðpb.Checkpoint{Root: make([]byte, 32)}, }, } - _, err = attesterServer.ProposeAttestationElectra(context.Background(), req) + _, err = attesterServer.ProposeAttestationElectra(t.Context(), req) assert.ErrorContains(t, "ProposeAttestationElectra not supported yet", err) }) } @@ -146,7 +145,7 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) { req := util.HydrateAttestation(ðpb.Attestation{}) wanted := "Incorrect attestation signature" - _, err := attesterServer.ProposeAttestation(context.Background(), req) + _, err := attesterServer.ProposeAttestation(t.Context(), req) assert.ErrorContains(t, wanted, err) } @@ -193,7 +192,7 @@ func TestGetAttestationData_OK(t *testing.T) { CommitteeIndex: 0, Slot: 3*params.BeaconConfig().SlotsPerEpoch + 1, } - res, err := attesterServer.GetAttestationData(context.Background(), req) + res, err := attesterServer.GetAttestationData(t.Context(), req) require.NoError(t, err, "Could not get attestation info at slot") expectedInfo := ðpb.AttestationData{ @@ -262,7 +261,7 @@ func BenchmarkGetAttestationDataConcurrent(b *testing.B) { for j := 0; j < 5000; j++ { go func() { defer wg.Done() - _, err := attesterServer.GetAttestationData(context.Background(), req) + _, err := attesterServer.GetAttestationData(b.Context(), req) require.NoError(b, err, "Could not get attestation info at slot") }() } @@ -276,7 +275,7 @@ func TestGetAttestationData_SyncNotReady(t *testing.T) { as := Server{ SyncChecker: &mockSync.Sync{IsSyncing: true}, } - _, err := as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{}) + _, err := as.GetAttestationData(t.Context(), ðpb.AttestationDataRequest{}) assert.ErrorContains(t, "Syncing to latest head", err) } @@ -297,7 +296,7 @@ func TestGetAttestationData_Optimistic(t *testing.T) { OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, }, } - _, err := as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{}) + _, err := as.GetAttestationData(t.Context(), ðpb.AttestationDataRequest{}) s, ok := status.FromError(err) require.Equal(t, true, ok) require.DeepEqual(t, codes.Unavailable, s.Code()) @@ -317,12 +316,12 @@ func TestGetAttestationData_Optimistic(t *testing.T) { OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, }, } - _, err = as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{}) + _, err = as.GetAttestationData(t.Context(), ðpb.AttestationDataRequest{}) require.NoError(t, err) } func TestServer_GetAttestationData_InvalidRequestSlot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() slot := 3*params.BeaconConfig().SlotsPerEpoch + 1 offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot)) @@ -344,7 +343,7 @@ func TestServer_GetAttestationData_InvalidRequestSlot(t *testing.T) { } func TestServer_GetAttestationData_RequestSlotIsDifferentThanCurrentSlot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbutil.SetupDB(t) slot := 3*params.BeaconConfig().SlotsPerEpoch + 1 @@ -434,7 +433,7 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) { CommitteeIndex: 0, Slot: 5, } - res, err := attesterServer.GetAttestationData(context.Background(), req) + res, err := attesterServer.GetAttestationData(t.Context(), req) require.NoError(t, err, "Could not get attestation info at slot") expectedInfo := ðpb.AttestationData{ @@ -503,7 +502,7 @@ func TestGetAttestationData_CommitteeIndexIsZeroPostElectra(t *testing.T) { CommitteeIndex: 123, // set non-zero committee index Slot: 3*params.BeaconConfig().SlotsPerEpoch + 1, } - res, err := attesterServer.GetAttestationData(context.Background(), req) + res, err := attesterServer.GetAttestationData(t.Context(), req) require.NoError(t, err) expected := ðpb.AttestationData{ @@ -531,7 +530,7 @@ func TestServer_SubscribeCommitteeSubnets_NoSlots(t *testing.T) { OperationNotifier: (&mock.ChainService{}).OperationNotifier(), } - _, err := attesterServer.SubscribeCommitteeSubnets(context.Background(), ðpb.CommitteeSubnetsSubscribeRequest{ + _, err := attesterServer.SubscribeCommitteeSubnets(t.Context(), ðpb.CommitteeSubnetsSubscribeRequest{ Slots: nil, CommitteeIds: nil, IsAggregator: nil, @@ -564,7 +563,7 @@ func TestServer_SubscribeCommitteeSubnets_DifferentLengthSlots(t *testing.T) { ss = append(ss, 321) - _, err := attesterServer.SubscribeCommitteeSubnets(context.Background(), ðpb.CommitteeSubnetsSubscribeRequest{ + _, err := attesterServer.SubscribeCommitteeSubnets(t.Context(), ðpb.CommitteeSubnetsSubscribeRequest{ Slots: ss, CommitteeIds: comIdxs, IsAggregator: isAggregator, @@ -607,7 +606,7 @@ func TestServer_SubscribeCommitteeSubnets_MultipleSlots(t *testing.T) { isAggregator = append(isAggregator, boolVal) } - _, err = attesterServer.SubscribeCommitteeSubnets(context.Background(), ðpb.CommitteeSubnetsSubscribeRequest{ + _, err = attesterServer.SubscribeCommitteeSubnets(t.Context(), ðpb.CommitteeSubnetsSubscribeRequest{ Slots: ss, CommitteeIds: comIdxs, IsAggregator: isAggregator, diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/blocks_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/blocks_test.go index 2247c317d7..ba080992f3 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/blocks_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/blocks_test.go @@ -18,10 +18,12 @@ import ( "github.com/OffchainLabs/prysm/v6/testing/require" "github.com/OffchainLabs/prysm/v6/testing/util" "go.uber.org/mock/gomock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func TestServer_StreamAltairBlocksVerified_ContextCanceled(t *testing.T) { - ctx := context.Background() + ctx := t.Context() chainService := &chainMock.ChainService{} ctx, cancel := context.WithCancel(ctx) @@ -47,7 +49,7 @@ func TestServer_StreamAltairBlocksVerified_ContextCanceled(t *testing.T) { } func TestServer_StreamAltairBlocks_ContextCanceled(t *testing.T) { - ctx := context.Background() + ctx := t.Context() chainService := &chainMock.ChainService{} ctx, cancel := context.WithCancel(ctx) @@ -73,7 +75,7 @@ func TestServer_StreamAltairBlocks_ContextCanceled(t *testing.T) { func TestServer_StreamAltairBlocks_OnHeadUpdated(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) - ctx := context.Background() + ctx := t.Context() beaconState, privs := util.DeterministicGenesisStateAltair(t, 64) c, err := altair.NextSyncCommittee(ctx, beaconState) require.NoError(t, err) @@ -98,7 +100,10 @@ func TestServer_StreamAltairBlocks_OnHeadUpdated(t *testing.T) { mockStream.EXPECT().Context().Return(ctx).AnyTimes() go func(tt *testing.T) { - assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{}, mockStream), "Could not call RPC method") + err := server.StreamBlocksAltair(ðpb.StreamBlocksRequest{}, mockStream) + if s, _ := status.FromError(err); s.Code() != codes.Canceled { + assert.NoError(tt, err) + } }(t) wrappedBlk, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) @@ -115,7 +120,7 @@ func TestServer_StreamAltairBlocks_OnHeadUpdated(t *testing.T) { func TestServer_StreamCapellaBlocks_OnHeadUpdated(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) - ctx := context.Background() + ctx := t.Context() beaconState, privs := util.DeterministicGenesisStateCapella(t, 64) c, err := altair.NextSyncCommittee(ctx, beaconState) require.NoError(t, err) @@ -140,7 +145,10 @@ func TestServer_StreamCapellaBlocks_OnHeadUpdated(t *testing.T) { mockStream.EXPECT().Context().Return(ctx).AnyTimes() go func(tt *testing.T) { - assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{}, mockStream), "Could not call RPC method") + err := server.StreamBlocksAltair(ðpb.StreamBlocksRequest{}, mockStream) + if s, _ := status.FromError(err); s.Code() != codes.Canceled { + assert.NoError(tt, err) + } }(t) wrappedBlk, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) @@ -156,7 +164,7 @@ func TestServer_StreamCapellaBlocks_OnHeadUpdated(t *testing.T) { func TestServer_StreamAltairBlocksVerified_OnHeadUpdated(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() beaconState, privs := util.DeterministicGenesisStateAltair(t, 32) c, err := altair.NextSyncCommittee(ctx, beaconState) require.NoError(t, err) @@ -183,9 +191,10 @@ func TestServer_StreamAltairBlocksVerified_OnHeadUpdated(t *testing.T) { mockStream.EXPECT().Context().Return(ctx).AnyTimes() go func(tt *testing.T) { - assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{ - VerifiedOnly: true, - }, mockStream), "Could not call RPC method") + err := server.StreamBlocksAltair(ðpb.StreamBlocksRequest{VerifiedOnly: true}, mockStream) + if s, _ := status.FromError(err); s.Code() != codes.Canceled { + assert.NoError(tt, err) + } }(t) // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). for sent := 0; sent == 0; { @@ -199,7 +208,7 @@ func TestServer_StreamAltairBlocksVerified_OnHeadUpdated(t *testing.T) { func TestServer_StreamCapellaBlocksVerified_OnHeadUpdated(t *testing.T) { db := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() beaconState, privs := util.DeterministicGenesisStateCapella(t, 32) c, err := altair.NextSyncCommittee(ctx, beaconState) require.NoError(t, err) @@ -226,9 +235,10 @@ func TestServer_StreamCapellaBlocksVerified_OnHeadUpdated(t *testing.T) { mockStream.EXPECT().Context().Return(ctx).AnyTimes() go func(tt *testing.T) { - assert.NoError(tt, server.StreamBlocksAltair(ðpb.StreamBlocksRequest{ - VerifiedOnly: true, - }, mockStream), "Could not call RPC method") + err := server.StreamBlocksAltair(ðpb.StreamBlocksRequest{VerifiedOnly: true}, mockStream) + if s, _ := status.FromError(err); s.Code() != codes.Canceled { + assert.NoError(tt, err) + } }(t) // Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed). for sent := 0; sent == 0; { @@ -241,7 +251,7 @@ func TestServer_StreamCapellaBlocksVerified_OnHeadUpdated(t *testing.T) { } func TestServer_StreamSlotsVerified_ContextCanceled(t *testing.T) { - ctx := context.Background() + ctx := t.Context() chainService := &chainMock.ChainService{} ctx, cancel := context.WithCancel(ctx) @@ -267,7 +277,7 @@ func TestServer_StreamSlotsVerified_ContextCanceled(t *testing.T) { } func TestServer_StreamSlots_ContextCanceled(t *testing.T) { - ctx := context.Background() + ctx := t.Context() chainService := &chainMock.ChainService{} ctx, cancel := context.WithCancel(ctx) @@ -293,7 +303,7 @@ func TestServer_StreamSlots_ContextCanceled(t *testing.T) { func TestServer_StreamSlots_OnHeadUpdated(t *testing.T) { params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.BeaconConfig()) - ctx := context.Background() + ctx := t.Context() chainService := &chainMock.ChainService{} server := &Server{ @@ -316,7 +326,10 @@ func TestServer_StreamSlots_OnHeadUpdated(t *testing.T) { mockStream.EXPECT().Context().Return(ctx).AnyTimes() go func(tt *testing.T) { - assert.NoError(tt, server.StreamSlots(ðpb.StreamSlotsRequest{}, mockStream), "Could not call RPC method") + err := server.StreamSlots(ðpb.StreamSlotsRequest{}, mockStream) + if s, _ := status.FromError(err); s.Code() != codes.Canceled { + assert.NoError(tt, err) + } }(t) wrappedBlk, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 123, Body: ðpb.BeaconBlockBody{}}}) require.NoError(t, err) @@ -331,7 +344,7 @@ func TestServer_StreamSlots_OnHeadUpdated(t *testing.T) { } func TestServer_StreamSlotsVerified_OnHeadUpdated(t *testing.T) { - ctx := context.Background() + ctx := t.Context() chainService := &chainMock.ChainService{} server := &Server{ Ctx: ctx, @@ -352,9 +365,10 @@ func TestServer_StreamSlotsVerified_OnHeadUpdated(t *testing.T) { mockStream.EXPECT().Context().Return(ctx).AnyTimes() go func(tt *testing.T) { - assert.NoError(tt, server.StreamSlots(ðpb.StreamSlotsRequest{ - VerifiedOnly: true, - }, mockStream), "Could not call RPC method") + err := server.StreamSlots(ðpb.StreamSlotsRequest{VerifiedOnly: true}, mockStream) + if s, _ := status.FromError(err); s.Code() != codes.Canceled { + assert.NoError(tt, err) + } }(t) wrappedBlk, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 123, Body: ðpb.BeaconBlockBody{}}}) require.NoError(t, err) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/duties_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/duties_test.go index ce13bd0e7f..4de0d87098 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/duties_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/duties_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "encoding/binary" "testing" "time" @@ -39,7 +38,7 @@ func TestGetDuties_OK(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not setup genesis bs") genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") @@ -66,7 +65,7 @@ func TestGetDuties_OK(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[0].Data.PublicKey}, } - res, err := vs.GetDuties(context.Background(), req) + res, err := vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -78,7 +77,7 @@ func TestGetDuties_OK(t *testing.T) { req = ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[lastValidatorIndex].Data.PublicKey}, } - res, err = vs.GetDuties(context.Background(), req) + res, err = vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -90,7 +89,7 @@ func TestGetDuties_OK(t *testing.T) { PublicKeys: pubKeys, Epoch: 0, } - res, err = vs.GetDuties(context.Background(), req) + res, err = vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { assert.Equal(t, primitives.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex) @@ -108,7 +107,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := util.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not setup genesis bs") h := ðpb.BeaconBlockHeader{ StateRoot: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), @@ -119,7 +118,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) { genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") - syncCommittee, err := altair.NextSyncCommittee(context.Background(), bs) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), bs) require.NoError(t, err) require.NoError(t, bs.SetCurrentSyncCommittee(syncCommittee)) pubKeys := make([][]byte, len(deposits)) @@ -153,7 +152,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[0].Data.PublicKey}, } - res, err := vs.GetDuties(context.Background(), req) + res, err := vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -165,7 +164,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) { req = ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[lastValidatorIndex].Data.PublicKey}, } - res, err = vs.GetDuties(context.Background(), req) + res, err = vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -177,7 +176,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) { PublicKeys: pubKeys, Epoch: 0, } - res, err = vs.GetDuties(context.Background(), req) + res, err = vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { require.Equal(t, primitives.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex) @@ -193,7 +192,7 @@ func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) { PublicKeys: pubKeys, Epoch: params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1, } - res, err = vs.GetDuties(context.Background(), req) + res, err = vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee) @@ -212,7 +211,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := util.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) h := ðpb.BeaconBlockHeader{ StateRoot: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), ParentRoot: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength), @@ -223,7 +222,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) { genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") - syncCommittee, err := altair.NextSyncCommittee(context.Background(), bs) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), bs) require.NoError(t, err) require.NoError(t, bs.SetCurrentSyncCommittee(syncCommittee)) pubKeys := make([][]byte, len(deposits)) @@ -260,7 +259,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[0].Data.PublicKey}, } - res, err := vs.GetDuties(context.Background(), req) + res, err := vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -272,7 +271,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) { req = ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[lastValidatorIndex].Data.PublicKey}, } - res, err = vs.GetDuties(context.Background(), req) + res, err = vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -284,7 +283,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) { PublicKeys: pubKeys, Epoch: 0, } - res, err = vs.GetDuties(context.Background(), req) + res, err = vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { assert.Equal(t, primitives.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex) @@ -300,7 +299,7 @@ func TestGetBellatrixDuties_SyncCommitteeOK(t *testing.T) { PublicKeys: pubKeys, Epoch: params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1, } - res, err = vs.GetDuties(context.Background(), req) + res, err = vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee) @@ -318,7 +317,7 @@ func TestGetAltairDuties_UnknownPubkey(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := util.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err) h := ðpb.BeaconBlockHeader{ StateRoot: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), @@ -354,7 +353,7 @@ func TestGetAltairDuties_UnknownPubkey(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{unknownPubkey}, } - res, err := vs.GetDuties(context.Background(), req) + res, err := vs.GetDuties(t.Context(), req) require.NoError(t, err) assert.Equal(t, false, res.CurrentEpochDuties[0].IsSyncCommittee) assert.Equal(t, false, res.NextEpochDuties[0].IsSyncCommittee) @@ -371,7 +370,7 @@ func TestGetDuties_SlotOutOfUpperBound(t *testing.T) { req := ðpb.DutiesRequest{ Epoch: primitives.Epoch(chain.CurrentSlot()/params.BeaconConfig().SlotsPerEpoch + 2), } - _, err := vs.duties(context.Background(), req) + _, err := vs.duties(t.Context(), req) require.ErrorContains(t, "can not be greater than next epoch", err) } @@ -382,7 +381,7 @@ func TestGetDuties_CurrentEpoch_ShouldNotFail(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bState, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bState, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not setup genesis state") // Set state to non-epoch start slot. require.NoError(t, bState.SetSlot(5)) @@ -412,7 +411,7 @@ func TestGetDuties_CurrentEpoch_ShouldNotFail(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[0].Data.PublicKey}, } - res, err := vs.GetDuties(context.Background(), req) + res, err := vs.GetDuties(t.Context(), req) require.NoError(t, err) assert.Equal(t, 1, len(res.CurrentEpochDuties), "Expected 1 assignment") } @@ -425,7 +424,7 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not setup genesis bs") genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") @@ -455,7 +454,7 @@ func TestGetDuties_MultipleKeys_OK(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{pubkey0, pubkey1}, } - res, err := vs.GetDuties(context.Background(), req) + res, err := vs.GetDuties(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") assert.Equal(t, 2, len(res.CurrentEpochDuties)) assert.Equal(t, primitives.Slot(4), res.CurrentEpochDuties[0].AttesterSlot) @@ -466,7 +465,7 @@ func TestGetDuties_SyncNotReady(t *testing.T) { vs := &Server{ SyncChecker: &mockSync.Sync{IsSyncing: true}, } - _, err := vs.GetDuties(context.Background(), ðpb.DutiesRequest{}) + _, err := vs.GetDuties(t.Context(), ðpb.DutiesRequest{}) assert.ErrorContains(t, "Syncing to latest head", err) } @@ -478,7 +477,7 @@ func BenchmarkCommitteeAssignment(b *testing.B) { require.NoError(b, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(b, err) - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(b.Context(), deposits, 0, eth1Data) require.NoError(b, err, "Could not setup genesis bs") genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(b, err, "Could not get signing root") @@ -508,7 +507,7 @@ func BenchmarkCommitteeAssignment(b *testing.B) { } b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := vs.GetDuties(context.Background(), req) + _, err := vs.GetDuties(b.Context(), req) assert.NoError(b, err) } } diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/duties_v2_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/duties_v2_test.go index 08e79ccbcd..efc689bf43 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/duties_v2_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/duties_v2_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "testing" "time" @@ -33,7 +32,7 @@ func TestGetDutiesV2_OK(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not setup genesis bs") genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") @@ -60,7 +59,7 @@ func TestGetDutiesV2_OK(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[0].Data.PublicKey}, } - res, err := vs.GetDutiesV2(context.Background(), req) + res, err := vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -72,7 +71,7 @@ func TestGetDutiesV2_OK(t *testing.T) { req = ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[lastValidatorIndex].Data.PublicKey}, } - res, err = vs.GetDutiesV2(context.Background(), req) + res, err = vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -84,7 +83,7 @@ func TestGetDutiesV2_OK(t *testing.T) { PublicKeys: pubKeys, Epoch: 0, } - res, err = vs.GetDutiesV2(context.Background(), req) + res, err = vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { assert.Equal(t, primitives.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex) @@ -102,7 +101,7 @@ func TestGetAltairDutiesV2_SyncCommitteeOK(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := util.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not setup genesis bs") h := ðpb.BeaconBlockHeader{ StateRoot: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), @@ -113,7 +112,7 @@ func TestGetAltairDutiesV2_SyncCommitteeOK(t *testing.T) { genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") - syncCommittee, err := altair.NextSyncCommittee(context.Background(), bs) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), bs) require.NoError(t, err) require.NoError(t, bs.SetCurrentSyncCommittee(syncCommittee)) pubKeys := make([][]byte, len(deposits)) @@ -147,7 +146,7 @@ func TestGetAltairDutiesV2_SyncCommitteeOK(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[0].Data.PublicKey}, } - res, err := vs.GetDutiesV2(context.Background(), req) + res, err := vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -159,7 +158,7 @@ func TestGetAltairDutiesV2_SyncCommitteeOK(t *testing.T) { req = ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[lastValidatorIndex].Data.PublicKey}, } - res, err = vs.GetDutiesV2(context.Background(), req) + res, err = vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -171,7 +170,7 @@ func TestGetAltairDutiesV2_SyncCommitteeOK(t *testing.T) { PublicKeys: pubKeys, Epoch: 0, } - res, err = vs.GetDutiesV2(context.Background(), req) + res, err = vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { require.Equal(t, primitives.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex) @@ -187,7 +186,7 @@ func TestGetAltairDutiesV2_SyncCommitteeOK(t *testing.T) { PublicKeys: pubKeys, Epoch: params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1, } - res, err = vs.GetDutiesV2(context.Background(), req) + res, err = vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee) @@ -206,7 +205,7 @@ func TestGetBellatrixDutiesV2_SyncCommitteeOK(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := util.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) h := ðpb.BeaconBlockHeader{ StateRoot: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), ParentRoot: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength), @@ -217,7 +216,7 @@ func TestGetBellatrixDutiesV2_SyncCommitteeOK(t *testing.T) { genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") - syncCommittee, err := altair.NextSyncCommittee(context.Background(), bs) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), bs) require.NoError(t, err) require.NoError(t, bs.SetCurrentSyncCommittee(syncCommittee)) pubKeys := make([][]byte, len(deposits)) @@ -254,7 +253,7 @@ func TestGetBellatrixDutiesV2_SyncCommitteeOK(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[0].Data.PublicKey}, } - res, err := vs.GetDutiesV2(context.Background(), req) + res, err := vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -266,7 +265,7 @@ func TestGetBellatrixDutiesV2_SyncCommitteeOK(t *testing.T) { req = ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[lastValidatorIndex].Data.PublicKey}, } - res, err = vs.GetDutiesV2(context.Background(), req) + res, err = vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch { t.Errorf("Assigned slot %d can't be higher than %d", @@ -278,7 +277,7 @@ func TestGetBellatrixDutiesV2_SyncCommitteeOK(t *testing.T) { PublicKeys: pubKeys, Epoch: 0, } - res, err = vs.GetDutiesV2(context.Background(), req) + res, err = vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { assert.Equal(t, primitives.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex) @@ -294,7 +293,7 @@ func TestGetBellatrixDutiesV2_SyncCommitteeOK(t *testing.T) { PublicKeys: pubKeys, Epoch: params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1, } - res, err = vs.GetDutiesV2(context.Background(), req) + res, err = vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") for i := 0; i < len(res.CurrentEpochDuties); i++ { require.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee) @@ -312,7 +311,7 @@ func TestGetAltairDutiesV2_UnknownPubkey(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := util.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err) h := ðpb.BeaconBlockHeader{ StateRoot: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), @@ -349,7 +348,7 @@ func TestGetAltairDutiesV2_UnknownPubkey(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{unknownPubkey}, } - res, err := vs.GetDutiesV2(context.Background(), req) + res, err := vs.GetDutiesV2(t.Context(), req) require.NoError(t, err) assert.Equal(t, false, res.CurrentEpochDuties[0].IsSyncCommittee) assert.Equal(t, false, res.NextEpochDuties[0].IsSyncCommittee) @@ -391,7 +390,7 @@ func TestGetDutiesV2_StateAdvancement(t *testing.T) { } // Verify state processing occurs - res, err := vs.GetDutiesV2(context.Background(), req) + res, err := vs.GetDutiesV2(t.Context(), req) require.NoError(t, err) require.NotNil(t, res) } @@ -408,7 +407,7 @@ func TestGetDutiesV2_SlotOutOfUpperBound(t *testing.T) { req := ðpb.DutiesRequest{ Epoch: primitives.Epoch(chain.CurrentSlot()/params.BeaconConfig().SlotsPerEpoch + 2), } - _, err := vs.GetDutiesV2(context.Background(), req) + _, err := vs.GetDutiesV2(t.Context(), req) require.ErrorContains(t, "can not be greater than next epoch", err) } @@ -419,7 +418,7 @@ func TestGetDutiesV2_CurrentEpoch_ShouldNotFail(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bState, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bState, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not setup genesis state") // Set state to non-epoch start slot. require.NoError(t, bState.SetSlot(5)) @@ -449,7 +448,7 @@ func TestGetDutiesV2_CurrentEpoch_ShouldNotFail(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{deposits[0].Data.PublicKey}, } - res, err := vs.GetDutiesV2(context.Background(), req) + res, err := vs.GetDutiesV2(t.Context(), req) require.NoError(t, err) assert.Equal(t, 1, len(res.CurrentEpochDuties), "Expected 1 assignment") } @@ -462,7 +461,7 @@ func TestGetDutiesV2_MultipleKeys_OK(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - bs, err := transition.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err, "Could not setup genesis bs") genesisRoot, err := genesis.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") @@ -492,7 +491,7 @@ func TestGetDutiesV2_MultipleKeys_OK(t *testing.T) { req := ðpb.DutiesRequest{ PublicKeys: [][]byte{pubkey0, pubkey1}, } - res, err := vs.GetDutiesV2(context.Background(), req) + res, err := vs.GetDutiesV2(t.Context(), req) require.NoError(t, err, "Could not call epoch committee assignment") assert.Equal(t, 2, len(res.CurrentEpochDuties)) assert.Equal(t, primitives.Slot(4), res.CurrentEpochDuties[0].AttesterSlot) @@ -515,10 +514,10 @@ func TestGetDutiesV2_NextSyncCommitteePeriod(t *testing.T) { require.NoError(t, err) eth1Data, err := util.DeterministicEth1Data(len(deposits)) require.NoError(t, err) - st, err := util.GenesisBeaconState(context.Background(), deposits, 0, eth1Data) + st, err := util.GenesisBeaconState(t.Context(), deposits, 0, eth1Data) require.NoError(t, err) - syncCommittee, err := altair.NextSyncCommittee(context.Background(), st) + syncCommittee, err := altair.NextSyncCommittee(t.Context(), st) require.NoError(t, err) require.NoError(t, st.SetCurrentSyncCommittee(syncCommittee)) require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*primitives.Slot(boundaryEpoch))) @@ -543,7 +542,7 @@ func TestGetDutiesV2_NextSyncCommitteePeriod(t *testing.T) { SyncChecker: &mockSync.Sync{IsSyncing: false}, } - res, err := vs.GetDutiesV2(context.Background(), req) + res, err := vs.GetDutiesV2(t.Context(), req) require.NoError(t, err) //Verify next epoch duties have updated sync committee status @@ -557,6 +556,6 @@ func TestGetDutiesV2_SyncNotReady(t *testing.T) { vs := &Server{ SyncChecker: &mockSync.Sync{IsSyncing: true}, } - _, err := vs.GetDutiesV2(context.Background(), ðpb.DutiesRequest{}) + _, err := vs.GetDutiesV2(t.Context(), ðpb.DutiesRequest{}) assert.ErrorContains(t, "Syncing to latest head", err) } diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/exit_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/exit_test.go index 6370719db9..89ff60bdaf 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/exit_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/exit_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "testing" "time" @@ -24,7 +23,7 @@ import ( ) func TestProposeExit_Notification(t *testing.T) { - ctx := context.Background() + ctx := t.Context() deposits, keys, err := util.DeterministicDepositsAndKeys(params.BeaconConfig().MinGenesisActiveValidatorCount) require.NoError(t, err) @@ -66,7 +65,7 @@ func TestProposeExit_Notification(t *testing.T) { req.Signature, err = signing.ComputeDomainAndSign(beaconState, epoch, req.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0]) require.NoError(t, err) - resp, err := server.ProposeExit(context.Background(), req) + resp, err := server.ProposeExit(t.Context(), req) require.NoError(t, err) expectedRoot, err := req.Exit.HashTreeRoot() require.NoError(t, err) @@ -91,7 +90,7 @@ func TestProposeExit_Notification(t *testing.T) { } func TestProposeExit_NoPanic(t *testing.T) { - ctx := context.Background() + ctx := t.Context() deposits, keys, err := util.DeterministicDepositsAndKeys(params.BeaconConfig().MinGenesisActiveValidatorCount) require.NoError(t, err) @@ -123,7 +122,7 @@ func TestProposeExit_NoPanic(t *testing.T) { defer opSub.Unsubscribe() req := ðpb.SignedVoluntaryExit{} - _, err = server.ProposeExit(context.Background(), req) + _, err = server.ProposeExit(t.Context(), req) require.ErrorContains(t, "voluntary exit does not exist", err, "Expected error for no exit existing") // Send the request, expect a result on the state feed. @@ -135,15 +134,15 @@ func TestProposeExit_NoPanic(t *testing.T) { }, } - _, err = server.ProposeExit(context.Background(), req) + _, err = server.ProposeExit(t.Context(), req) require.ErrorContains(t, "invalid signature provided", err, "Expected error for no signature exists") req.Signature = bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}) - _, err = server.ProposeExit(context.Background(), req) + _, err = server.ProposeExit(t.Context(), req) require.ErrorContains(t, "invalid signature provided", err, "Expected error for invalid signature length") req.Signature, err = signing.ComputeDomainAndSign(beaconState, epoch, req.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0]) require.NoError(t, err) - resp, err := server.ProposeExit(context.Background(), req) + resp, err := server.ProposeExit(t.Context(), req) require.NoError(t, err) expectedRoot, err := req.Exit.HashTreeRoot() require.NoError(t, err) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_altair_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_altair_test.go index 231ae7a79b..af3ed84549 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_altair_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_altair_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -15,7 +14,7 @@ func TestServer_SetSyncAggregate_EmptyCase(t *testing.T) { b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockAltair()) require.NoError(t, err) s := &Server{} // Sever is not initialized with sync committee pool. - s.setSyncAggregate(context.Background(), b) + s.setSyncAggregate(t.Context(), b) agg, err := b.Block().Body().SyncAggregate() require.NoError(t, err) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_attestations_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_attestations_test.go index e9096bc991..d6269acf06 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_attestations_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_attestations_test.go @@ -2,7 +2,6 @@ package validator import ( "bytes" - "context" "math/rand" "sort" "strconv" @@ -448,7 +447,7 @@ func TestProposer_ProposerAtts_dedup(t *testing.T) { } func Test_packAttestations(t *testing.T) { - ctx := context.Background() + ctx := t.Context() phase0Att := ðpb.Attestation{ AggregationBits: bitfield.Bitlist{0b11111}, Data: ðpb.AttestationData{ @@ -530,7 +529,7 @@ func Test_packAttestations(t *testing.T) { } func TestPackAttestations_ElectraOnChainAggregates(t *testing.T) { - ctx := context.Background() + ctx := t.Context() params.SetupTestConfigCleanup(t) cfg := params.BeaconConfig().Copy() @@ -684,7 +683,7 @@ func sliceCast(atts []*ethpb.AttestationElectra) []ethpb.Att { } func Benchmark_packAttestations_Electra(b *testing.B) { - ctx := context.Background() + ctx := b.Context() params.SetupTestConfigCleanup(b) cfg := params.MainnetConfig() @@ -801,7 +800,7 @@ func Test_filterBatchSignature(t *testing.T) { aBad := util.NewAttestation() pa := proposerAtts(aGood) pa = append(pa, aBad) - aFiltered := pa.filterBatchSignature(context.Background(), st) + aFiltered := pa.filterBatchSignature(t.Context(), st) assert.Equal(t, 1, len(aFiltered)) assert.DeepEqual(t, aGood[0], aFiltered[0]) } @@ -922,7 +921,7 @@ func Test_filterCurrentEpochAttestationByForkchoice(t *testing.T) { }, } - ctx := context.Background() + ctx := t.Context() got, err := s.filterCurrentEpochAttestationByForkchoice(ctx, a, epoch) require.NoError(t, err) require.Equal(t, false, got) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix_test.go index a53de8d29a..52559233d5 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "math" "math/big" "testing" @@ -37,7 +36,7 @@ import ( func TestServer_setExecutionData(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() cfg := params.BeaconConfig().Copy() cfg.BellatrixForkEpoch = 0 cfg.CapellaForkEpoch = 0 @@ -52,11 +51,11 @@ func TestServer_setExecutionData(t *testing.T) { b2pbCapella := util.NewBeaconBlockCapella() b2rCapella, err := b2pbCapella.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b2pbCapella) + util.SaveBlock(t, t.Context(), beaconDB, b2pbCapella) require.NoError(t, capellaTransitionState.SetFinalizedCheckpoint(ðpb.Checkpoint{ Root: b2rCapella[:], })) - require.NoError(t, beaconDB.SaveFeeRecipientsByValidatorIDs(context.Background(), []primitives.ValidatorIndex{0}, []common.Address{{}})) + require.NoError(t, beaconDB.SaveFeeRecipientsByValidatorIDs(t.Context(), []primitives.ValidatorIndex{0}, []common.Address{{}})) denebTransitionState, _ := util.DeterministicGenesisStateDeneb(t, 1) wrappedHeaderDeneb, err := blocks.WrappedExecutionPayloadHeaderDeneb(&v1.ExecutionPayloadHeaderDeneb{BlockNumber: 2}) @@ -65,7 +64,7 @@ func TestServer_setExecutionData(t *testing.T) { b2pbDeneb := util.NewBeaconBlockDeneb() b2rDeneb, err := b2pbDeneb.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b2pbDeneb) + util.SaveBlock(t, t.Context(), beaconDB, b2pbDeneb) require.NoError(t, denebTransitionState.SetFinalizedCheckpoint(ðpb.Checkpoint{ Root: b2rDeneb[:], })) @@ -103,7 +102,7 @@ func TestServer_setExecutionData(t *testing.T) { builderBid, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex(), gasLimit) require.NoError(t, err) require.IsNil(t, builderBid) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, defaultBuilderBoostFactor) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, defaultBuilderBoostFactor) require.NoError(t, err) require.IsNil(t, bundle) e, err := blk.Block().Body().Execution() @@ -172,7 +171,7 @@ func TestServer_setExecutionData(t *testing.T) { require.NoError(t, err) _, err = builderBid.Header() require.NoError(t, err) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, defaultBuilderBoostFactor) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, defaultBuilderBoostFactor) require.NoError(t, err) require.IsNil(t, bundle) e, err := blk.Block().Body().Execution() @@ -244,7 +243,7 @@ func TestServer_setExecutionData(t *testing.T) { require.NoError(t, err) _, err = builderBid.Header() require.NoError(t, err) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, defaultBuilderBoostFactor) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, defaultBuilderBoostFactor) require.NoError(t, err) require.IsNil(t, bundle) e, err := blk.Block().Body().Execution() @@ -315,7 +314,7 @@ func TestServer_setExecutionData(t *testing.T) { require.NoError(t, err) _, err = builderBid.Header() require.NoError(t, err) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, math.MaxUint64) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, math.MaxUint64) require.NoError(t, err) require.IsNil(t, bundle) e, err := blk.Block().Body().Execution() @@ -386,7 +385,7 @@ func TestServer_setExecutionData(t *testing.T) { require.NoError(t, err) _, err = builderBid.Header() require.NoError(t, err) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, 0) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, 0) require.NoError(t, err) require.IsNil(t, bundle) e, err := blk.Block().Body().Execution() @@ -407,7 +406,7 @@ func TestServer_setExecutionData(t *testing.T) { require.NoError(t, err) _, err = builderBid.Header() require.NoError(t, err) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, defaultBuilderBoostFactor) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, defaultBuilderBoostFactor) require.NoError(t, err) require.IsNil(t, bundle) e, err := blk.Block().Body().Execution() @@ -434,7 +433,7 @@ func TestServer_setExecutionData(t *testing.T) { require.NoError(t, err) _, err = builderBid.Header() require.NoError(t, err) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, defaultBuilderBoostFactor) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, defaultBuilderBoostFactor) require.NoError(t, err) require.IsNil(t, bundle) e, err := blk.Block().Body().Execution() @@ -464,7 +463,7 @@ func TestServer_setExecutionData(t *testing.T) { require.NoError(t, err) _, err = builderBid.Header() require.NoError(t, err) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, defaultBuilderBoostFactor) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, defaultBuilderBoostFactor) require.NoError(t, err) require.IsNil(t, bundle) e, err := blk.Block().Body().Execution() @@ -490,7 +489,7 @@ func TestServer_setExecutionData(t *testing.T) { builderBid, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex(), gasLimit) require.ErrorIs(t, consensus_types.ErrNilObjectWrapped, err) // Builder returns fault. Use local block require.IsNil(t, builderBid) - _, bundle, err := setExecutionData(context.Background(), blk, res, nil, defaultBuilderBoostFactor) + _, bundle, err := setExecutionData(t.Context(), blk, res, nil, defaultBuilderBoostFactor) require.NoError(t, err) require.IsNil(t, bundle) e, err := blk.Block().Body().Execution() @@ -620,7 +619,7 @@ func TestServer_setExecutionData(t *testing.T) { res, err := vs.getLocalPayload(ctx, blk.Block(), denebTransitionState) require.NoError(t, err) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, defaultBuilderBoostFactor) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, defaultBuilderBoostFactor) require.NoError(t, err) require.IsNil(t, bundle) @@ -744,7 +743,7 @@ func TestServer_setExecutionData(t *testing.T) { res, err := vs.getLocalPayload(ctx, blk.Block(), denebTransitionState) require.NoError(t, err) - _, bundle, err := setExecutionData(context.Background(), blk, res, builderBid, defaultBuilderBoostFactor) + _, bundle, err := setExecutionData(t.Context(), blk, res, builderBid, defaultBuilderBoostFactor) require.NoError(t, err) require.IsNil(t, bundle) @@ -1013,7 +1012,7 @@ func TestServer_getPayloadHeader(t *testing.T) { Genesis: genesis, }} regCache := cache.NewRegistrationCache() - regCache.UpdateIndexToRegisteredMap(context.Background(), map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1{ + regCache.UpdateIndexToRegisteredMap(t.Context(), map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1{ 0: { GasLimit: gasLimit, FeeRecipient: make([]byte, 20), @@ -1021,9 +1020,9 @@ func TestServer_getPayloadHeader(t *testing.T) { }, }) tc.mock.RegistrationCache = regCache - hb, err := vs.HeadFetcher.HeadBlock(context.Background()) + hb, err := vs.HeadFetcher.HeadBlock(t.Context()) require.NoError(t, err) - bid, err := vs.getPayloadHeaderFromBuilder(context.Background(), hb.Block().Slot(), 0, 30000000) + bid, err := vs.getPayloadHeaderFromBuilder(t.Context(), hb.Block().Slot(), 0, 30000000) if tc.err != "" { require.ErrorContains(t, tc.err, err) } else { diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_builder_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_builder_test.go index ef5234b116..387f3fcc99 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_builder_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_builder_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "testing" "time" @@ -41,7 +40,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) { ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} - ctx := context.Background() + ctx := t.Context() st, blkRoot, err := createState(1, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc) require.NoError(t, err) require.NoError(t, s.ForkchoiceFetcher.InsertNode(ctx, st, blkRoot)) @@ -73,18 +72,18 @@ func TestServer_circuitBreakBuilder(t *testing.T) { } func TestServer_validatorRegistered(t *testing.T) { - b, err := builder.NewService(context.Background()) + b, err := builder.NewService(t.Context()) require.NoError(t, err) proposerServer := &Server{ BlockBuilder: b, } - ctx := context.Background() + ctx := t.Context() reg, err := proposerServer.validatorRegistered(ctx, 0) require.ErrorContains(t, "nil beacon db", err) require.Equal(t, false, reg) db := dbTest.SetupDB(t) - realBuilder, err := builder.NewService(context.Background(), builder.WithDatabase(db)) + realBuilder, err := builder.NewService(t.Context(), builder.WithDatabase(db)) require.NoError(t, err) proposerServer.BlockBuilder = realBuilder reg, err = proposerServer.validatorRegistered(ctx, 0) @@ -111,11 +110,11 @@ func TestServer_canUseBuilder(t *testing.T) { HasConfigured: false, }, } - reg, err := proposerServer.canUseBuilder(context.Background(), 0, 0) + reg, err := proposerServer.canUseBuilder(t.Context(), 0, 0) require.NoError(t, err) require.Equal(t, false, reg) - ctx := context.Background() + ctx := t.Context() proposerServer.ForkchoiceFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()} proposerServer.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix())) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits_test.go index 99e4db8deb..6f51c55782 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "math/big" "testing" @@ -73,7 +72,7 @@ func TestShouldFallback(t *testing.T) { func TestProposer_PendingDeposits_Electra(t *testing.T) { // Electra continues to pack deposits while the state eth1deposit index is less than the eth1depositIndexLimit - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) newHeight := big.NewInt(height.Int64() + 11000) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload_test.go index 0fa0e9353e..62be55ecfa 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload_test.go @@ -42,7 +42,7 @@ func TestServer_getExecutionPayload(t *testing.T) { b1pb := util.NewBeaconBlock() b1r, err := b1pb.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b1pb) + util.SaveBlock(t, t.Context(), beaconDB, b1pb) require.NoError(t, nonTransitionSt.SetFinalizedCheckpoint(ðpb.Checkpoint{ Root: b1r[:], })) @@ -54,7 +54,7 @@ func TestServer_getExecutionPayload(t *testing.T) { b2pb := util.NewBeaconBlockBellatrix() b2r, err := b2pb.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b2pb) + util.SaveBlock(t, t.Context(), beaconDB, b2pb) require.NoError(t, transitionSt.SetFinalizedCheckpoint(ðpb.Checkpoint{ Root: b2r[:], })) @@ -66,7 +66,7 @@ func TestServer_getExecutionPayload(t *testing.T) { b2pbCapella := util.NewBeaconBlockCapella() b2rCapella, err := b2pbCapella.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b2pbCapella) + util.SaveBlock(t, t.Context(), beaconDB, b2pbCapella) require.NoError(t, capellaTransitionState.SetFinalizedCheckpoint(ðpb.Checkpoint{ Root: b2rCapella[:], })) @@ -165,7 +165,7 @@ func TestServer_getExecutionPayload(t *testing.T) { blk.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32) b, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - res, err := vs.getLocalPayload(context.Background(), b.Block(), tt.st) + res, err := vs.getLocalPayload(t.Context(), b.Block(), tt.st) if tt.errString != "" { require.ErrorContains(t, tt.errString, err) } else { @@ -182,12 +182,12 @@ func TestServer_getExecutionPayloadContextTimeout(t *testing.T) { b1pb := util.NewBeaconBlock() b1r, err := b1pb.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b1pb) + util.SaveBlock(t, t.Context(), beaconDB, b1pb) require.NoError(t, nonTransitionSt.SetFinalizedCheckpoint(ðpb.Checkpoint{ Root: b1r[:], })) - require.NoError(t, beaconDB.SaveFeeRecipientsByValidatorIDs(context.Background(), []primitives.ValidatorIndex{0}, []common.Address{{}})) + require.NoError(t, beaconDB.SaveFeeRecipientsByValidatorIDs(t.Context(), []primitives.ValidatorIndex{0}, []common.Address{{}})) cfg := params.BeaconConfig().Copy() cfg.TerminalBlockHash = common.Hash{'a'} @@ -211,7 +211,7 @@ func TestServer_getExecutionPayloadContextTimeout(t *testing.T) { blk.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32) b, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - _, err = vs.getLocalPayload(context.Background(), b.Block(), nonTransitionSt) + _, err = vs.getLocalPayload(t.Context(), b.Block(), nonTransitionSt) require.NoError(t, err) } @@ -222,7 +222,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) { b1pb := util.NewBeaconBlock() b1r, err := b1pb.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b1pb) + util.SaveBlock(t, t.Context(), beaconDB, b1pb) require.NoError(t, nonTransitionSt.SetFinalizedCheckpoint(ðpb.Checkpoint{ Root: b1r[:], })) @@ -234,7 +234,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) { b2pb := util.NewBeaconBlockBellatrix() b2r, err := b2pb.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, b2pb) + util.SaveBlock(t, t.Context(), beaconDB, b2pb) require.NoError(t, transitionSt.SetFinalizedCheckpoint(ðpb.Checkpoint{ Root: b2r[:], })) @@ -268,7 +268,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) { blk.Block.ParentRoot = bytesutil.PadTo([]byte{}, 32) b, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - res, err := vs.getLocalPayload(context.Background(), b.Block(), transitionSt) + res, err := vs.getLocalPayload(t.Context(), b.Block(), transitionSt) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, common.Address(res.ExecutionData.FeeRecipient()), feeRecipient) @@ -281,7 +281,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) { payload.FeeRecipient = evilRecipientAddress[:] vs.PayloadIDCache = cache.NewPayloadIDCache() - res, err = vs.getLocalPayload(context.Background(), b.Block(), transitionSt) + res, err = vs.getLocalPayload(t.Context(), b.Block(), transitionSt) require.NoError(t, err) require.NotNil(t, res) @@ -375,7 +375,7 @@ func TestServer_getTerminalBlockHashIfExists(t *testing.T) { BlockByHashMap: m, }, } - b, e, err := vs.getTerminalBlockHashIfExists(context.Background(), 1) + b, e, err := vs.getTerminalBlockHashIfExists(t.Context(), 1) if tt.errString != "" { require.ErrorContains(t, tt.errString, err) require.DeepEqual(t, tt.wantExists, e) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_slashings_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_slashings_test.go index d5103b9f17..a90ac8c8aa 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_slashings_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_slashings_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings" @@ -24,7 +23,7 @@ func TestServer_getSlashings(t *testing.T) { proposerSlashing, err := util.GenerateProposerSlashingForValidator(beaconState, privKeys[i], i) require.NoError(t, err) proposerSlashings[i] = proposerSlashing - err = proposerServer.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing) + err = proposerServer.SlashingsPool.InsertProposerSlashing(t.Context(), beaconState, proposerSlashing) require.NoError(t, err) } @@ -37,11 +36,11 @@ func TestServer_getSlashings(t *testing.T) { ) require.NoError(t, err) attSlashings[i] = attesterSlashing - err = proposerServer.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing) + err = proposerServer.SlashingsPool.InsertAttesterSlashing(t.Context(), beaconState, attesterSlashing) require.NoError(t, err) } - p, a := proposerServer.getSlashings(context.Background(), beaconState) + p, a := proposerServer.getSlashings(t.Context(), beaconState) require.Equal(t, len(p), int(params.BeaconConfig().MaxProposerSlashings)) require.Equal(t, len(a), int(params.BeaconConfig().MaxAttesterSlashings)) require.DeepEqual(t, p, proposerSlashings) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go index f9fef13cd3..aa9b0ca1de 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go @@ -59,7 +59,7 @@ import ( func TestServer_GetBeaconBlock_Phase0(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 64) stateRoot, err := beaconState.HashTreeRoot(ctx) @@ -119,7 +119,7 @@ func TestServer_GetBeaconBlock_Phase0(t *testing.T) { func TestServer_GetBeaconBlock_Altair(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() params.SetupTestConfigCleanup(t) cfg := params.BeaconConfig().Copy() @@ -193,7 +193,7 @@ func TestServer_GetBeaconBlock_Altair(t *testing.T) { func TestServer_GetBeaconBlock_Bellatrix(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32) @@ -320,7 +320,7 @@ func TestServer_GetBeaconBlock_Bellatrix(t *testing.T) { func TestServer_GetBeaconBlock_Capella(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() transition.SkipSlotCache.Disable() params.SetupTestConfigCleanup(t) @@ -435,7 +435,7 @@ func TestServer_GetBeaconBlock_Capella(t *testing.T) { func TestServer_GetBeaconBlock_Deneb(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() transition.SkipSlotCache.Disable() params.SetupTestConfigCleanup(t) @@ -558,7 +558,7 @@ func TestServer_GetBeaconBlock_Deneb(t *testing.T) { func TestServer_GetBeaconBlock_Electra(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() transition.SkipSlotCache.Disable() params.SetupTestConfigCleanup(t) @@ -686,7 +686,7 @@ func TestServer_GetBeaconBlock_Electra(t *testing.T) { func TestServer_GetBeaconBlock_Fulu(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() transition.SkipSlotCache.Disable() params.SetupTestConfigCleanup(t) @@ -833,7 +833,7 @@ func TestServer_GetBeaconBlock_Optimistic(t *testing.T) { req := ðpb.BlockRequest{ Slot: bellatrixSlot + 1, } - _, err = proposerServer.GetBeaconBlock(context.Background(), req) + _, err = proposerServer.GetBeaconBlock(t.Context(), req) s, ok := status.FromError(err) require.Equal(t, true, ok) require.DeepEqual(t, codes.Unavailable, s.Code()) @@ -876,7 +876,7 @@ func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, s proposerSlashing, err := util.GenerateProposerSlashingForValidator(st, keys[i], i /* validator index */) require.NoError(t, err) proposerSlashings[i] = proposerSlashing - err = server.SlashingsPool.InsertProposerSlashing(context.Background(), st, proposerSlashing) + err = server.SlashingsPool.InsertProposerSlashing(t.Context(), st, proposerSlashing) require.NoError(t, err) } @@ -887,7 +887,7 @@ func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, s attesterSlashing, ok := generatedAttesterSlashing.(*ethpb.AttesterSlashing) require.Equal(t, true, ok, "Attester slashing has the wrong type (expected %T, got %T)", ðpb.AttesterSlashing{}, generatedAttesterSlashing) attSlashings[i] = attesterSlashing - err = server.SlashingsPool.InsertAttesterSlashing(context.Background(), st, generatedAttesterSlashing.(*ethpb.AttesterSlashing)) + err = server.SlashingsPool.InsertAttesterSlashing(t.Context(), st, generatedAttesterSlashing.(*ethpb.AttesterSlashing)) require.NoError(t, err) } return proposerSlashings, attSlashings @@ -1102,7 +1102,7 @@ func TestProposer_ProposeBlock_OK(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numDeposits := uint64(64) beaconState, _ := util.DeterministicGenesisState(t, numDeposits) @@ -1122,7 +1122,7 @@ func TestProposer_ProposeBlock_OK(t *testing.T) { OperationNotifier: c.OperationNotifier(), } blockToPropose := tt.block(bsRoot) - res, err := proposerServer.ProposeBeaconBlock(context.Background(), blockToPropose) + res, err := proposerServer.ProposeBeaconBlock(t.Context(), blockToPropose) if tt.err != "" { // Expecting an error require.ErrorContains(t, tt.err, err) } else { @@ -1137,7 +1137,7 @@ func TestProposer_ProposeBlock_OK(t *testing.T) { func TestProposer_ComputeStateRoot_OK(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() beaconState, parentRoot, privKeys := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 100) @@ -1164,12 +1164,12 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) { wsb, err := blocks.NewSignedBeaconBlock(req) require.NoError(t, err) - _, err = proposerServer.computeStateRoot(context.Background(), wsb) + _, err = proposerServer.computeStateRoot(t.Context(), wsb) require.NoError(t, err) } func TestProposer_PendingDeposits_Eth1DataVoteOK(t *testing.T) { - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) newHeight := big.NewInt(height.Int64() + 11000) @@ -1249,7 +1249,7 @@ func TestProposer_PendingDeposits_Eth1DataVoteOK(t *testing.T) { } func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) { - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) p := &mockExecution.Chain{ @@ -1367,7 +1367,7 @@ func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) { } func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) { - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) newHeight := big.NewInt(height.Int64() + 11000) @@ -1501,7 +1501,7 @@ func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) { } func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testing.T) { - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) p := &mockExecution.Chain{ LatestBlockNumber: height, @@ -1600,7 +1600,7 @@ func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testin } func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) { - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) p := &mockExecution.Chain{ @@ -1698,7 +1698,7 @@ func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) { } func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) { - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) p := &mockExecution.Chain{ @@ -1796,7 +1796,7 @@ func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) { } func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) { - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) p := &mockExecution.Chain{ @@ -1912,7 +1912,7 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) { } func TestProposer_DepositTrie_RebuildTrie(t *testing.T) { - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) p := &mockExecution.Chain{ @@ -2116,7 +2116,7 @@ func TestProposer_ValidateDepositTrie(t *testing.T) { } func TestProposer_Eth1Data_MajorityVote_SpansGenesis(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // Voting period will span genesis, causing the special case for pre-mined genesis to kick in. // In other words some part of the valid time range is before genesis, so querying the block cache would fail // without the special case added to allow this for testnets. @@ -2173,7 +2173,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { require.NoError(t, err) root, err := depositTrie.HashTreeRoot() require.NoError(t, err) - assert.NoError(t, depositCache.InsertDeposit(context.Background(), dc.Deposit, dc.Eth1BlockHeight, dc.Index, root)) + assert.NoError(t, depositCache.InsertDeposit(t.Context(), dc.Deposit, dc.Eth1BlockHeight, dc.Index, root)) t.Run("choose highest count", func(t *testing.T) { t.Skip() @@ -2202,7 +2202,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2238,7 +2238,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2274,7 +2274,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2311,7 +2311,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2348,7 +2348,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2385,7 +2385,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2415,7 +2415,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: currentEth1Data}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2450,7 +2450,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2480,7 +2480,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2512,7 +2512,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: currentEth1Data}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2548,7 +2548,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2585,7 +2585,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2616,7 +2616,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2650,7 +2650,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 1}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2689,7 +2689,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { HeadFetcher: &mock.ChainService{ETH1Data: ðpb.Eth1Data{DepositCount: 0}}, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2724,7 +2724,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) { DepositFetcher: depositCache, } - ctx := context.Background() + ctx := t.Context() majorityVoteEth1Data, err := ps.eth1DataMajorityVote(ctx, beaconState) require.NoError(t, err) @@ -2789,7 +2789,7 @@ func TestProposer_FilterAttestation(t *testing.T) { }, AggregationBits: bitfield.Bitlist{0b00010010}, }) - committee, err := helpers.BeaconCommitteeFromState(context.Background(), st, atts[i].GetData().Slot, atts[i].GetData().CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), st, atts[i].GetData().Slot, atts[i].GetData().CommitteeIndex) assert.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(atts[i], committee) require.NoError(t, err) @@ -2823,14 +2823,14 @@ func TestProposer_FilterAttestation(t *testing.T) { HeadFetcher: &mock.ChainService{State: st, Root: genesisRoot[:]}, } atts := tt.inputAtts() - received := proposerServer.validateAndDeleteAttsInPool(context.Background(), st, atts) + received := proposerServer.validateAndDeleteAttsInPool(t.Context(), st, atts) assert.DeepEqual(t, tt.expectedAtts(atts), received) }) } } func TestProposer_Deposits_ReturnsEmptyList_IfLatestEth1DataEqGenesisEth1Block(t *testing.T) { - ctx := context.Background() + ctx := t.Context() height := big.NewInt(int64(params.BeaconConfig().Eth1FollowDistance)) p := &mockExecution.Chain{ @@ -2947,7 +2947,7 @@ func TestProposer_DeleteAttsInPool_Aggregated(t *testing.T) { aa, err := attaggregation.Aggregate(aggregatedAtts) require.NoError(t, err) - require.NoError(t, s.deleteAttsInPool(context.Background(), append(aa, unaggregatedAtts...))) + require.NoError(t, s.deleteAttsInPool(t.Context(), append(aa, unaggregatedAtts...))) assert.Equal(t, 0, len(s.AttPool.AggregatedAttestations()), "Did not delete aggregated attestation") atts := s.AttPool.UnaggregatedAttestations() assert.Equal(t, 0, len(atts), "Did not delete unaggregated attestation") @@ -2983,15 +2983,15 @@ func TestProposer_GetSyncAggregate_OK(t *testing.T) { require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeContribution(cont)) } - aggregate, err := proposerServer.getSyncAggregate(context.Background(), 1, bytesutil.ToBytes32(conts[0].BlockRoot)) + aggregate, err := proposerServer.getSyncAggregate(t.Context(), 1, bytesutil.ToBytes32(conts[0].BlockRoot)) require.NoError(t, err) require.DeepEqual(t, bitfield.Bitvector32{0xf, 0xf, 0xf, 0xf}, aggregate.SyncCommitteeBits) - aggregate, err = proposerServer.getSyncAggregate(context.Background(), 2, bytesutil.ToBytes32(conts[0].BlockRoot)) + aggregate, err = proposerServer.getSyncAggregate(t.Context(), 2, bytesutil.ToBytes32(conts[0].BlockRoot)) require.NoError(t, err) require.DeepEqual(t, bitfield.Bitvector32{0xaa, 0xaa, 0xaa, 0xaa}, aggregate.SyncCommitteeBits) - aggregate, err = proposerServer.getSyncAggregate(context.Background(), 3, bytesutil.ToBytes32(conts[0].BlockRoot)) + aggregate, err = proposerServer.getSyncAggregate(t.Context(), 3, bytesutil.ToBytes32(conts[0].BlockRoot)) require.NoError(t, err) require.DeepEqual(t, bitfield.NewBitvector32(), aggregate.SyncCommitteeBits) } @@ -3037,7 +3037,7 @@ func TestProposer_PrepareBeaconProposer(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() proposerServer := &Server{ BeaconDB: db, TrackedValidatorsCache: cache.NewTrackedValidatorsCache(), @@ -3062,7 +3062,7 @@ func TestProposer_PrepareBeaconProposer(t *testing.T) { func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) { hook := logTest.NewGlobal() db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() proposerServer := &Server{ BeaconDB: db, TrackedValidatorsCache: cache.NewTrackedValidatorsCache(), @@ -3119,7 +3119,7 @@ func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) { func BenchmarkServer_PrepareBeaconProposer(b *testing.B) { db := dbutil.SetupDB(b) - ctx := context.Background() + ctx := b.Context() proposerServer := &Server{ BeaconDB: db, TrackedValidatorsCache: cache.NewTrackedValidatorsCache(), @@ -3143,7 +3143,7 @@ func BenchmarkServer_PrepareBeaconProposer(b *testing.B) { } func TestProposer_SubmitValidatorRegistrations(t *testing.T) { - ctx := context.Background() + ctx := t.Context() proposerServer := &Server{} reg := ðpb.SignedValidatorRegistrationsV1{} _, err := proposerServer.SubmitValidatorRegistrations(ctx, reg) @@ -3170,7 +3170,7 @@ func majorityVoteBoundaryTime(slot primitives.Slot) (uint64, uint64) { func TestProposer_GetFeeRecipientByPubKey(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() numDeposits := uint64(64) beaconState, _ := util.DeterministicGenesisState(t, numDeposits) bsRoot, err := beaconState.HashTreeRoot(ctx) @@ -3210,7 +3210,7 @@ func TestProposer_GetFeeRecipientByPubKey(t *testing.T) { func TestProposer_GetParentHeadState(t *testing.T) { db := dbutil.SetupDB(t) - ctx := context.Background() + ctx := t.Context() parentState, parentRoot, _ := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 100) headState, headRoot, _ := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 50) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/server_mainnet_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/server_mainnet_test.go index 6837a4f6de..69093b533f 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/server_mainnet_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/server_mainnet_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "testing" mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing" @@ -25,7 +24,7 @@ func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) { // This test breaks if it doesn't use mainnet config params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MainnetConfig()) - ctx := context.Background() + ctx := t.Context() priv1, err := bls.RandKey() require.NoError(t, err) @@ -74,7 +73,7 @@ func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) { s, err := state_native.InitializeFromProtoUnsafePhase0(beaconState) require.NoError(t, err) vs := &Server{ - Ctx: context.Background(), + Ctx: t.Context(), ChainStartFetcher: &mockExecution.Chain{}, BlockFetcher: &mockExecution.Chain{}, Eth1InfoFetcher: &mockExecution.Chain{}, @@ -88,7 +87,7 @@ func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) { defer ctrl.Finish() mockChainStream := mock.NewMockBeaconNodeValidator_WaitForActivationServer(ctrl) - mockChainStream.EXPECT().Context().Return(context.Background()) + mockChainStream.EXPECT().Context().Return(t.Context()) mockChainStream.EXPECT().Send( ðpb.ValidatorActivationResponse{ Statuses: []*ethpb.ValidatorActivationResponse_Status{ diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/server_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/server_test.go index 6b5f59a1ff..2e1dece7d7 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/server_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/server_test.go @@ -24,6 +24,8 @@ import ( "github.com/OffchainLabs/prysm/v6/testing/util" logTest "github.com/sirupsen/logrus/hooks/test" "go.uber.org/mock/gomock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" ) @@ -43,7 +45,7 @@ func TestValidatorIndex_OK(t *testing.T) { req := ðpb.ValidatorIndexRequest{ PublicKey: pubKey, } - _, err = Server.ValidatorIndex(context.Background(), req) + _, err = Server.ValidatorIndex(t.Context(), req) assert.NoError(t, err, "Could not get validator index") } @@ -55,7 +57,7 @@ func TestValidatorIndex_StateEmpty(t *testing.T) { req := ðpb.ValidatorIndexRequest{ PublicKey: pubKey, } - _, err := Server.ValidatorIndex(context.Background(), req) + _, err := Server.ValidatorIndex(t.Context(), req) assert.ErrorContains(t, "head state is empty", err) } @@ -69,7 +71,7 @@ func TestWaitForActivation_ContextClosed(t *testing.T) { genesisRoot, err := block.Block.HashTreeRoot() require.NoError(t, err, "Could not get signing root") - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) depositCache, err := depositsnapshot.New() require.NoError(t, err) @@ -88,9 +90,9 @@ func TestWaitForActivation_ContextClosed(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockChainStream := mock.NewMockBeaconNodeValidator_WaitForActivationServer(ctrl) - mockChainStream.EXPECT().Context().Return(context.Background()) + mockChainStream.EXPECT().Context().Return(t.Context()) mockChainStream.EXPECT().Send(gomock.Any()).Return(nil) - mockChainStream.EXPECT().Context().Return(context.Background()) + mockChainStream.EXPECT().Context().Return(t.Context()) exitRoutine := make(chan bool) go func(tt *testing.T) { want := "context canceled" @@ -141,7 +143,7 @@ func TestWaitForActivation_MultipleStatuses(t *testing.T) { s, err := state_native.InitializeFromProtoUnsafePhase0(beaconState) require.NoError(t, err) vs := &Server{ - Ctx: context.Background(), + Ctx: t.Context(), ChainStartFetcher: &mockExecution.Chain{}, HeadFetcher: &mockChain.ChainService{State: s, Root: genesisRoot[:]}, } @@ -152,7 +154,7 @@ func TestWaitForActivation_MultipleStatuses(t *testing.T) { defer ctrl.Finish() mockChainStream := mock.NewMockBeaconNodeValidator_WaitForActivationServer(ctrl) - mockChainStream.EXPECT().Context().Return(context.Background()) + mockChainStream.EXPECT().Context().Return(t.Context()) mockChainStream.EXPECT().Send( ðpb.ValidatorActivationResponse{ Statuses: []*ethpb.ValidatorActivationResponse_Status{ @@ -188,7 +190,7 @@ func TestWaitForActivation_MultipleStatuses(t *testing.T) { } func TestWaitForChainStart_ContextClosed(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) chainService := &mockChain.ChainService{} server := &Server{ Ctx: ctx, @@ -223,7 +225,7 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) { chainService := &mockChain.ChainService{State: st, ValidatorsRoot: genesisValidatorsRoot} Server := &Server{ - Ctx: context.Background(), + Ctx: t.Context(), ChainStartFetcher: &mockExecution.Chain{ ChainFeed: new(event.Feed), }, @@ -240,7 +242,7 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) { GenesisValidatorsRoot: genesisValidatorsRoot[:], }, ).Return(nil) - mockStream.EXPECT().Context().Return(context.Background()) + mockStream.EXPECT().Context().Return(t.Context()) assert.NoError(t, Server.WaitForChainStart(&emptypb.Empty{}, mockStream), "Could not call RPC method") } @@ -249,7 +251,7 @@ func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) { chainService := &mockChain.ChainService{State: nil} gs := startup.NewClockSynchronizer() Server := &Server{ - Ctx: context.Background(), + Ctx: t.Context(), ChainStartFetcher: &mockExecution.Chain{ ChainFeed: new(event.Feed), }, @@ -260,12 +262,15 @@ func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockStream := mock.NewMockBeaconNodeValidator_WaitForChainStartServer(ctrl) - mockStream.EXPECT().Context().Return(context.Background()) + mockStream.EXPECT().Context().Return(t.Context()) wg := new(sync.WaitGroup) wg.Add(1) go func() { - assert.NoError(t, Server.WaitForChainStart(&emptypb.Empty{}, mockStream), "Could not call RPC method") + err := Server.WaitForChainStart(&emptypb.Empty{}, mockStream) + if s, _ := status.FromError(err); s.Code() != codes.Canceled { + assert.NoError(t, err) + } wg.Done() }() @@ -280,7 +285,7 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) { gs := startup.NewClockSynchronizer() Server := &Server{ - Ctx: context.Background(), + Ctx: t.Context(), ChainStartFetcher: &mockExecution.FaultyExecutionChain{ ChainFeed: new(event.Feed), }, @@ -299,7 +304,7 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) { GenesisValidatorsRoot: genesisValidatorsRoot[:], }, ).Return(nil) - mockStream.EXPECT().Context().Return(context.Background()) + mockStream.EXPECT().Context().Return(t.Context()) go func(tt *testing.T) { assert.NoError(tt, Server.WaitForChainStart(&emptypb.Empty{}, mockStream)) <-exitRoutine @@ -332,12 +337,12 @@ func TestServer_DomainData_Exits(t *testing.T) { s, err := state_native.InitializeFromProtoUnsafeBellatrix(beaconState) require.NoError(t, err) vs := &Server{ - Ctx: context.Background(), + Ctx: t.Context(), ChainStartFetcher: &mockExecution.Chain{}, HeadFetcher: &mockChain.ChainService{State: s, Root: genesisRoot[:]}, } - reqDomain, err := vs.DomainData(context.Background(), ðpb.DomainRequest{ + reqDomain, err := vs.DomainData(t.Context(), ðpb.DomainRequest{ Epoch: 100, Domain: params.BeaconConfig().DomainDeposit[:], }) @@ -353,7 +358,7 @@ func TestServer_DomainData_Exits(t *testing.T) { require.NoError(t, err) vs.HeadFetcher = &mockChain.ChainService{State: s, Root: genesisRoot[:]} - reqDomain, err = vs.DomainData(context.Background(), ðpb.DomainRequest{ + reqDomain, err = vs.DomainData(t.Context(), ðpb.DomainRequest{ Epoch: 100, Domain: params.BeaconConfig().DomainVoluntaryExit[:], }) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/status_mainnet_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/status_mainnet_test.go index 95bf007773..04c40a09b6 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/status_mainnet_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/status_mainnet_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "encoding/binary" "testing" "time" @@ -25,7 +24,7 @@ func TestValidatorStatus_Active(t *testing.T) { // This test breaks if it doesn't use mainnet config params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MainnetConfig()) - ctx := context.Background() + ctx := t.Context() pubkey := generatePubkey(1) @@ -82,7 +81,7 @@ func TestValidatorStatus_Active(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubkey, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") expected := ðpb.ValidatorStatusResponse{ diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/status_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/status_test.go index c328915fb6..3299d07563 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/status_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/status_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "reflect" "testing" "time" @@ -31,7 +30,7 @@ import ( ) func TestValidatorStatus_DepositedEth1(t *testing.T) { - ctx := context.Background() + ctx := t.Context() deposits, _, err := util.DeterministicDepositsAndKeys(1) require.NoError(t, err, "Could not generate deposits and keys") deposit := deposits[0] @@ -63,13 +62,13 @@ func TestValidatorStatus_DepositedEth1(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey1, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_DEPOSITED, resp.Status) } func TestValidatorStatus_Deposited(t *testing.T) { - ctx := context.Background() + ctx := t.Context() deps, keys, err := util.DeterministicDepositsAndKeys(1) require.NoError(t, err) @@ -105,13 +104,13 @@ func TestValidatorStatus_Deposited(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey1, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_DEPOSITED, resp.Status) } func TestValidatorStatus_PartiallyDeposited(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubKey1 := pubKey(1) depData := ðpb.Deposit_Data{ @@ -158,13 +157,13 @@ func TestValidatorStatus_PartiallyDeposited(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey1, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_PARTIALLY_DEPOSITED, resp.Status) } func TestValidatorStatus_Pending_MultipleDeposits(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubKey1 := pubKey(1) depData := ðpb.Deposit_Data{ @@ -217,13 +216,13 @@ func TestValidatorStatus_Pending_MultipleDeposits(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey1, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_PENDING, resp.Status) } func TestValidatorStatus_Pending(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubKey := pubKey(1) block := util.NewBeaconBlock() @@ -278,13 +277,13 @@ func TestValidatorStatus_Pending(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_PENDING, resp.Status) } func TestValidatorStatus_Exiting(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubKey := pubKey(1) @@ -340,13 +339,13 @@ func TestValidatorStatus_Exiting(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_EXITING, resp.Status) } func TestValidatorStatus_Slashing(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubKey := pubKey(1) @@ -399,13 +398,13 @@ func TestValidatorStatus_Slashing(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_EXITED, resp.Status) } func TestValidatorStatus_Exited(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubKey := pubKey(1) @@ -457,7 +456,7 @@ func TestValidatorStatus_Exited(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_EXITED, resp.Status) } @@ -481,13 +480,13 @@ func TestValidatorStatus_UnknownStatus(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_UNKNOWN_STATUS, resp.Status) } func TestActivationStatus_OK(t *testing.T) { - ctx := context.Background() + ctx := t.Context() deposits, _, err := util.DeterministicDepositsAndKeys(4) require.NoError(t, err) @@ -531,17 +530,17 @@ func TestActivationStatus_OK(t *testing.T) { assert.NoError(t, depositTrie.Insert(dep.Data.Signature, 15)) root, err = depositTrie.HashTreeRoot() require.NoError(t, err) - assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, root)) + assert.NoError(t, depositCache.InsertDeposit(t.Context(), dep, 0, 1, root)) vs := &Server{ - Ctx: context.Background(), + Ctx: t.Context(), ChainStartFetcher: &mockExecution.Chain{}, BlockFetcher: &mockExecution.Chain{}, Eth1InfoFetcher: &mockExecution.Chain{}, DepositFetcher: depositCache, HeadFetcher: &mockChain.ChainService{State: stateObj, Root: genesisRoot[:]}, } - activeExists, response, err := vs.activationStatus(context.Background(), pubKeys) + activeExists, response, err := vs.activationStatus(t.Context(), pubKeys) require.NoError(t, err) require.Equal(t, true, activeExists, "No activated validator exists when there was supposed to be 2") if response[0].Status.Status != ethpb.ValidatorStatus_ACTIVE { @@ -580,7 +579,7 @@ func TestActivationStatus_OK(t *testing.T) { func TestOptimisticStatus(t *testing.T) { params.SetupTestConfigCleanup(t) server := &Server{OptimisticModeFetcher: &mockChain.ChainService{}, TimeFetcher: &mockChain.ChainService{}} - err := server.optimisticStatus(context.Background()) + err := server.optimisticStatus(t.Context()) require.NoError(t, err) cfg := params.BeaconConfig().Copy() @@ -588,19 +587,19 @@ func TestOptimisticStatus(t *testing.T) { params.OverrideBeaconConfig(cfg) server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: true}, TimeFetcher: &mockChain.ChainService{}} - err = server.optimisticStatus(context.Background()) + err = server.optimisticStatus(t.Context()) s, ok := status.FromError(err) require.Equal(t, true, ok) require.DeepEqual(t, codes.Unavailable, s.Code()) require.ErrorContains(t, errOptimisticMode.Error(), err) server = &Server{OptimisticModeFetcher: &mockChain.ChainService{Optimistic: false}, TimeFetcher: &mockChain.ChainService{}} - err = server.optimisticStatus(context.Background()) + err = server.optimisticStatus(t.Context()) require.NoError(t, err) } func TestValidatorStatus_CorrectActivationQueue(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pbKey := pubKey(5) block := util.NewBeaconBlock() @@ -694,14 +693,14 @@ func TestValidatorStatus_CorrectActivationQueue(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pbKey, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_PENDING, resp.Status) assert.Equal(t, uint64(2), resp.PositionInActivationQueue, "Unexpected position in activation queue") } func TestMultipleValidatorStatus_Pubkeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() deposits, _, err := util.DeterministicDepositsAndKeys(6) require.NoError(t, err) @@ -762,10 +761,10 @@ func TestMultipleValidatorStatus_Pubkeys(t *testing.T) { assert.NoError(t, depositTrie.Insert(dep.Data.Signature, 15)) root, err = depositTrie.HashTreeRoot() require.NoError(t, err) - assert.NoError(t, depositCache.InsertDeposit(context.Background(), dep, 0, 1, root)) + assert.NoError(t, depositCache.InsertDeposit(t.Context(), dep, 0, 1, root)) vs := &Server{ - Ctx: context.Background(), + Ctx: t.Context(), ChainStartFetcher: &mockExecution.Chain{}, BlockFetcher: &mockExecution.Chain{}, Eth1InfoFetcher: &mockExecution.Chain{}, @@ -797,7 +796,7 @@ func TestMultipleValidatorStatus_Pubkeys(t *testing.T) { } req := ðpb.MultipleValidatorStatusRequest{PublicKeys: pubKeys} - response, err := vs.MultipleValidatorStatus(context.Background(), req) + response, err := vs.MultipleValidatorStatus(t.Context(), req) require.NoError(t, err) assert.Equal(t, len(response.PublicKeys), len(pubKeys)) @@ -860,7 +859,7 @@ func TestMultipleValidatorStatus_Indices(t *testing.T) { require.NoError(t, err, "Could not get signing root") vs := &Server{ - Ctx: context.Background(), + Ctx: t.Context(), ChainStartFetcher: &mockExecution.Chain{}, BlockFetcher: &mockExecution.Chain{}, Eth1InfoFetcher: &mockExecution.Chain{}, @@ -891,7 +890,7 @@ func TestMultipleValidatorStatus_Indices(t *testing.T) { // Note: Index 6 should be skipped. req := ðpb.MultipleValidatorStatusRequest{Indices: []int64{0, 1, 2, 3, 4, 5, 6}} - response, err := vs.MultipleValidatorStatus(context.Background(), req) + response, err := vs.MultipleValidatorStatus(t.Context(), req) require.NoError(t, err) assert.Equal(t, len(beaconState.Validators), len(response.PublicKeys)) @@ -908,7 +907,7 @@ func TestMultipleValidatorStatus_Indices(t *testing.T) { } func TestValidatorStatus_Invalid(t *testing.T) { - ctx := context.Background() + ctx := t.Context() deposits, _, err := util.DeterministicDepositsAndKeys(1) require.NoError(t, err, "Could not generate deposits and keys") deposit := deposits[0] @@ -941,7 +940,7 @@ func TestValidatorStatus_Invalid(t *testing.T) { req := ðpb.ValidatorStatusRequest{ PublicKey: pubKey1, } - resp, err := vs.ValidatorStatus(context.Background(), req) + resp, err := vs.ValidatorStatus(t.Context(), req) require.NoError(t, err, "Could not get validator status") assert.Equal(t, ethpb.ValidatorStatus_INVALID, resp.Status) } @@ -1251,7 +1250,7 @@ func TestServer_CheckDoppelGanger(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vs, req, resp := tt.svSetup(t) - got, err := vs.CheckDoppelGanger(context.Background(), req) + got, err := vs.CheckDoppelGanger(t.Context(), req) if (err != nil) != tt.wantErr { t.Errorf("CheckDoppelGanger() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/sync_committee_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/sync_committee_test.go index 7677206930..35c5e6eaa6 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/sync_committee_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/sync_committee_test.go @@ -1,7 +1,6 @@ package validator import ( - "context" "testing" "time" @@ -31,7 +30,7 @@ func TestGetSyncMessageBlockRoot_OK(t *testing.T) { HeadFetcher: &mock.ChainService{Root: r}, TimeFetcher: &mock.ChainService{Genesis: time.Now()}, } - res, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{}) + res, err := server.GetSyncMessageBlockRoot(t.Context(), &emptypb.Empty{}) require.NoError(t, err) require.DeepEqual(t, r, res.Root) } @@ -47,7 +46,7 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) { TimeFetcher: &mock.ChainService{Genesis: time.Now()}, OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, } - _, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{}) + _, err := server.GetSyncMessageBlockRoot(t.Context(), &emptypb.Empty{}) s, ok := status.FromError(err) require.Equal(t, true, ok) require.DeepEqual(t, codes.Unavailable, s.Code()) @@ -58,7 +57,7 @@ func TestGetSyncMessageBlockRoot_Optimistic(t *testing.T) { TimeFetcher: &mock.ChainService{Genesis: time.Now()}, OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, } - _, err = server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{}) + _, err = server.GetSyncMessageBlockRoot(t.Context(), &emptypb.Empty{}) require.NoError(t, err) } @@ -77,7 +76,7 @@ func TestSubmitSyncMessage_OK(t *testing.T) { Slot: 1, ValidatorIndex: 2, } - _, err := server.SubmitSyncMessage(context.Background(), msg) + _, err := server.SubmitSyncMessage(t.Context(), msg) require.NoError(t, err) savedMsgs, err := server.CoreService.SyncCommitteePool.SyncCommitteeMessages(1) require.NoError(t, err) @@ -95,7 +94,7 @@ func TestGetSyncSubcommitteeIndex_Ok(t *testing.T) { } var pubKey [fieldparams.BLSPubkeyLength]byte // Request slot 0, should get the index 0 for validator 0. - res, err := server.GetSyncSubcommitteeIndex(context.Background(), ðpb.SyncSubcommitteeIndexRequest{ + res, err := server.GetSyncSubcommitteeIndex(t.Context(), ðpb.SyncSubcommitteeIndexRequest{ PublicKey: pubKey[:], Slot: primitives.Slot(0), }) require.NoError(t, err) @@ -129,14 +128,14 @@ func TestGetSyncCommitteeContribution_FiltersDuplicates(t *testing.T) { BlockRoot: make([]byte, 32), Signature: sig, } - _, err = server.SubmitSyncMessage(context.Background(), msg) + _, err = server.SubmitSyncMessage(t.Context(), msg) require.NoError(t, err) - _, err = server.SubmitSyncMessage(context.Background(), msg) + _, err = server.SubmitSyncMessage(t.Context(), msg) require.NoError(t, err) val, err := st.ValidatorAtIndex(2) require.NoError(t, err) - contr, err := server.GetSyncCommitteeContribution(context.Background(), + contr, err := server.GetSyncCommitteeContribution(t.Context(), ðpb.SyncCommitteeContributionRequest{ Slot: 1, PublicKey: val.PublicKey, @@ -161,7 +160,7 @@ func TestSubmitSignedContributionAndProof_OK(t *testing.T) { }, }, } - _, err := server.SubmitSignedContributionAndProof(context.Background(), contribution) + _, err := server.SubmitSignedContributionAndProof(t.Context(), contribution) require.NoError(t, err) savedMsgs, err := server.CoreService.SyncCommitteePool.SyncCommitteeContributions(1) require.NoError(t, err) @@ -190,7 +189,7 @@ func TestSubmitSignedContributionAndProof_Notification(t *testing.T) { }, }, } - _, err := server.SubmitSignedContributionAndProof(context.Background(), contribution) + _, err := server.SubmitSignedContributionAndProof(t.Context(), contribution) require.NoError(t, err) // Ensure the state notification was broadcast. diff --git a/beacon-chain/rpc/prysm/validator/handlers_test.go b/beacon-chain/rpc/prysm/validator/handlers_test.go index 4eb5f9b190..dcc4f8ef13 100644 --- a/beacon-chain/rpc/prysm/validator/handlers_test.go +++ b/beacon-chain/rpc/prysm/validator/handlers_test.go @@ -2,7 +2,6 @@ package validator import ( "bytes" - "context" "encoding/binary" "encoding/json" "fmt" @@ -81,7 +80,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) { helpers.ClearCache() beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validatorCount := uint64(32) validators := make([]*ethpb.Validator, validatorCount) @@ -185,7 +184,7 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) { params.OverrideBeaconConfig(params.BeaconConfig()) beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validatorCount := uint64(100) validators := make([]*ethpb.Validator, validatorCount) @@ -287,7 +286,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpochWithBits(t *testing validatorCount := uint64(32) genState, _ := util.DeterministicGenesisStateAltair(t, validatorCount) - c, err := altair.NextSyncCommittee(context.Background(), genState) + c, err := altair.NextSyncCommittee(t.Context(), genState) require.NoError(t, err) require.NoError(t, genState.SetCurrentSyncCommittee(c)) @@ -305,7 +304,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpochWithBits(t *testing t.Run("bellatrix", func(t *testing.T) { validatorCount := uint64(32) genState, _ := util.DeterministicGenesisStateBellatrix(t, validatorCount) - c, err := altair.NextSyncCommittee(context.Background(), genState) + c, err := altair.NextSyncCommittee(t.Context(), genState) require.NoError(t, err) require.NoError(t, genState.SetCurrentSyncCommittee(c)) @@ -323,7 +322,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpochWithBits(t *testing t.Run("capella", func(t *testing.T) { validatorCount := uint64(32) genState, _ := util.DeterministicGenesisStateCapella(t, validatorCount) - c, err := altair.NextSyncCommittee(context.Background(), genState) + c, err := altair.NextSyncCommittee(t.Context(), genState) require.NoError(t, err) require.NoError(t, genState.SetCurrentSyncCommittee(c)) @@ -343,7 +342,7 @@ func runGetValidatorParticipationCurrentEpoch(t *testing.T, genState state.Beaco helpers.ClearCache() beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validatorCount := uint64(32) gsr, err := genState.HashTreeRoot(ctx) @@ -441,7 +440,7 @@ func TestServer_GetValidatorActiveSetChanges_NoState(t *testing.T) { func TestServer_GetValidatorActiveSetChanges(t *testing.T) { beaconDB := dbTest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() validators := make([]*ethpb.Validator, 8) headState, err := util.NewBeaconState() require.NoError(t, err) diff --git a/beacon-chain/rpc/prysm/validator/validator_performance_test.go b/beacon-chain/rpc/prysm/validator/validator_performance_test.go index c63bff7ad9..0420132692 100644 --- a/beacon-chain/rpc/prysm/validator/validator_performance_test.go +++ b/beacon-chain/rpc/prysm/validator/validator_performance_test.go @@ -2,7 +2,6 @@ package validator import ( "bytes" - "context" "encoding/json" "io" "net/http" @@ -104,7 +103,7 @@ func TestServer_GetValidatorPerformance(t *testing.T) { require.DeepEqual(t, want, response) }) t.Run("Indices", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() publicKeys := [][48]byte{ bytesutil.ToBytes48([]byte{1}), bytesutil.ToBytes48([]byte{2}), @@ -169,7 +168,7 @@ func TestServer_GetValidatorPerformance(t *testing.T) { require.DeepEqual(t, want, response) }) t.Run("Indices Pubkeys", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() publicKeys := [][48]byte{ bytesutil.ToBytes48([]byte{1}), bytesutil.ToBytes48([]byte{2}), diff --git a/beacon-chain/rpc/service_test.go b/beacon-chain/rpc/service_test.go index 4db2d792e4..d9a7b6da68 100644 --- a/beacon-chain/rpc/service_test.go +++ b/beacon-chain/rpc/service_test.go @@ -1,7 +1,6 @@ package rpc import ( - "context" "errors" "io" "net/http" @@ -28,7 +27,7 @@ func TestLifecycle_OK(t *testing.T) { chainService := &mock.ChainService{ Genesis: time.Now(), } - rpcService := NewService(context.Background(), &Config{ + rpcService := NewService(t.Context(), &Config{ Port: "7348", SyncService: &mockSync.Sync{IsSyncing: false}, BlockReceiver: chainService, @@ -70,7 +69,7 @@ func TestStatus_Optimistic(t *testing.T) { func TestRPC_InsecureEndpoint(t *testing.T) { hook := logTest.NewGlobal() chainService := &mock.ChainService{Genesis: time.Now()} - rpcService := NewService(context.Background(), &Config{ + rpcService := NewService(t.Context(), &Config{ Port: "7777", SyncService: &mockSync.Sync{IsSyncing: false}, BlockReceiver: chainService, diff --git a/beacon-chain/slasher/chunks_test.go b/beacon-chain/slasher/chunks_test.go index 5bd45c33c6..d139456a65 100644 --- a/beacon-chain/slasher/chunks_test.go +++ b/beacon-chain/slasher/chunks_test.go @@ -1,7 +1,6 @@ package slasher import ( - "context" "math" "reflect" "testing" @@ -83,7 +82,7 @@ func TestMaxSpanChunksSlice_MaxChunkSpanFrom(t *testing.T) { } func TestMinSpanChunksSlice_CheckSlashable(t *testing.T) { - ctx := context.Background() + ctx := t.Context() for _, v := range []int{version.Phase0, version.Electra} { t.Run(version.String(v), func(t *testing.T) { @@ -176,7 +175,7 @@ func TestMinSpanChunksSlice_CheckSlashable(t *testing.T) { } func TestMinSpanChunksSlice_CheckSlashable_DifferentVersions(t *testing.T) { - ctx := context.Background() + ctx := t.Context() slasherDB := dbtest.SetupSlasherDB(t) params := &Parameters{ chunkSize: 3, @@ -221,7 +220,7 @@ func TestMinSpanChunksSlice_CheckSlashable_DifferentVersions(t *testing.T) { } func TestMaxSpanChunksSlice_CheckSlashable(t *testing.T) { - ctx := context.Background() + ctx := t.Context() for _, v := range []int{version.Phase0, version.Electra} { t.Run(version.String(v), func(t *testing.T) { @@ -317,7 +316,7 @@ func TestMaxSpanChunksSlice_CheckSlashable(t *testing.T) { } func TestMaxSpanChunksSlice_CheckSlashable_DifferentVersions(t *testing.T) { - ctx := context.Background() + ctx := t.Context() slasherDB := dbtest.SetupSlasherDB(t) params := &Parameters{ chunkSize: 4, diff --git a/beacon-chain/slasher/detect_attestations_test.go b/beacon-chain/slasher/detect_attestations_test.go index 499c1b7963..6801b2368a 100644 --- a/beacon-chain/slasher/detect_attestations_test.go +++ b/beacon-chain/slasher/detect_attestations_test.go @@ -623,7 +623,7 @@ func Test_processAttestations(t *testing.T) { name := version.String(tt.ver) + ": " + tt.name t.Run(name, func(t *testing.T) { // Create context. - ctx := context.Background() + ctx := t.Context() // Configure logging. hook := logTest.NewGlobal() @@ -651,7 +651,7 @@ func Test_processAttestations(t *testing.T) { } // Create the slasher service. - slasherService, err := New(context.Background(), serviceConfig) + slasherService, err := New(t.Context(), serviceConfig) require.NoError(t, err) // Initialize validators in the state. @@ -791,7 +791,7 @@ func Test_processQueuedAttestations_MultipleChunkIndices(t *testing.T) { defer hook.Reset() slasherDB := dbtest.SetupSlasherDB(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) slasherParams := DefaultParams() // We process submit attestations from chunk index 0 to chunk index 1. @@ -812,7 +812,7 @@ func Test_processQueuedAttestations_MultipleChunkIndices(t *testing.T) { State: beaconState, } - s, err := New(context.Background(), + s, err := New(t.Context(), &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, @@ -861,7 +861,7 @@ func Test_processQueuedAttestations_OverlappingChunkIndices(t *testing.T) { defer hook.Reset() slasherDB := dbtest.SetupSlasherDB(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) slasherParams := DefaultParams() startEpoch := primitives.Epoch(slasherParams.chunkSize) @@ -877,7 +877,7 @@ func Test_processQueuedAttestations_OverlappingChunkIndices(t *testing.T) { State: beaconState, } - s, err := New(context.Background(), + s, err := New(t.Context(), &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, @@ -1136,7 +1136,7 @@ func Test_updatedChunkByChunkIndex(t *testing.T) { for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { // Create context. - ctx := context.Background() + ctx := t.Context() // Initialize the slasher database. slasherDB := dbtest.SetupSlasherDB(t) @@ -1215,9 +1215,9 @@ func Test_updatedChunkByChunkIndex(t *testing.T) { } func Test_applyAttestationForValidator_MinSpanChunk(t *testing.T) { - ctx := context.Background() + ctx := t.Context() slasherDB := dbtest.SetupSlasherDB(t) - srv, err := New(context.Background(), + srv, err := New(t.Context(), &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, @@ -1272,9 +1272,9 @@ func Test_applyAttestationForValidator_MinSpanChunk(t *testing.T) { } func Test_applyAttestationForValidator_MaxSpanChunk(t *testing.T) { - ctx := context.Background() + ctx := t.Context() slasherDB := dbtest.SetupSlasherDB(t) - srv, err := New(context.Background(), + srv, err := New(t.Context(), &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, @@ -1338,10 +1338,10 @@ func Test_loadChunks_MaxSpans(t *testing.T) { func testLoadChunks(t *testing.T, kind slashertypes.ChunkKind) { slasherDB := dbtest.SetupSlasherDB(t) - ctx := context.Background() + ctx := t.Context() // Check if the chunk at chunk index already exists in-memory. - s, err := New(context.Background(), + s, err := New(t.Context(), &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, @@ -1419,7 +1419,7 @@ func TestService_processQueuedAttestations(t *testing.T) { Slot: &slot, } - s, err := New(context.Background(), + s, err := New(t.Context(), &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, @@ -1431,7 +1431,7 @@ func TestService_processQueuedAttestations(t *testing.T) { s.attsQueue.extend([]*slashertypes.IndexedAttestationWrapper{ createAttestationWrapperEmptySig(t, version.Phase0, 0, 1, []uint64{0, 1} /* indices */, nil /* signingRoot */), }) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) tickerChan := make(chan primitives.Slot) s.wg.Add(1) go func() { @@ -1458,7 +1458,7 @@ func Benchmark_saveChunksToDisk(b *testing.B) { params := DefaultParams() // Get a context. - ctx := context.Background() + ctx := b.Context() chunkByChunkIndexByValidatorChunkIndex := make(map[uint64]map[uint64]Chunker, validatorsChunksCount) @@ -1503,7 +1503,7 @@ func BenchmarkCheckSlashableAttestations(b *testing.B) { Slot: &slot, } - s, err := New(context.Background(), &ServiceConfig{ + s, err := New(b.Context(), &ServiceConfig{ Database: slasherDB, StateNotifier: &mock.MockStateNotifier{}, HeadStateFetcher: mockChain, @@ -1578,7 +1578,7 @@ func runAttestationsBenchmark(b *testing.B, s *Service, numAtts, numValidators u s.genesisTime = genesisTime epoch := slots.EpochsSinceGenesis(genesisTime) - _, err := s.checkSlashableAttestations(context.Background(), epoch, atts) + _, err := s.checkSlashableAttestations(b.Context(), epoch, atts) require.NoError(b, err) } } @@ -1595,7 +1595,7 @@ func Benchmark_checkSurroundVotes(b *testing.B) { currentEpoch = 43 ) // Create a context. - ctx := context.Background() + ctx := b.Context() // Initialize the slasher database. slasherDB := dbtest.SetupSlasherDB(b) diff --git a/beacon-chain/slasher/detect_blocks_test.go b/beacon-chain/slasher/detect_blocks_test.go index ee2d924910..a90967ef3d 100644 --- a/beacon-chain/slasher/detect_blocks_test.go +++ b/beacon-chain/slasher/detect_blocks_test.go @@ -71,7 +71,7 @@ func Test_processQueuedBlocks_DetectsDoubleProposals(t *testing.T) { hook := logTest.NewGlobal() beaconDB := dbtest.SetupDB(t) slasherDB := dbtest.SetupSlasherDB(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) beaconState, err := util.NewBeaconState() require.NoError(t, err) @@ -160,7 +160,7 @@ func Test_processQueuedBlocks_DetectsDoubleProposals(t *testing.T) { func Test_processQueuedBlocks_NotSlashable(t *testing.T) { hook := logTest.NewGlobal() slasherDB := dbtest.SetupSlasherDB(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) beaconState, err := util.NewBeaconState() require.NoError(t, err) diff --git a/beacon-chain/slasher/process_slashings_test.go b/beacon-chain/slasher/process_slashings_test.go index babd81b698..658008c927 100644 --- a/beacon-chain/slasher/process_slashings_test.go +++ b/beacon-chain/slasher/process_slashings_test.go @@ -1,7 +1,6 @@ package slasher import ( - "context" "testing" mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing" @@ -21,7 +20,7 @@ import ( ) func TestService_processAttesterSlashings(t *testing.T) { - ctx := context.Background() + ctx := t.Context() slasherDB := dbtest.SetupSlasherDB(t) beaconDB := dbtest.SetupDB(t) @@ -143,7 +142,7 @@ func TestService_processAttesterSlashings(t *testing.T) { } func TestService_processProposerSlashings(t *testing.T) { - ctx := context.Background() + ctx := t.Context() slasherDB := dbtest.SetupSlasherDB(t) beaconDB := dbtest.SetupDB(t) diff --git a/beacon-chain/slasher/receive_test.go b/beacon-chain/slasher/receive_test.go index af9a41f824..b059cf31ed 100644 --- a/beacon-chain/slasher/receive_test.go +++ b/beacon-chain/slasher/receive_test.go @@ -21,7 +21,7 @@ import ( ) func TestSlasher_receiveAttestations_OK(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) s := &Service{ serviceCfg: &ServiceConfig{ IndexedAttestationsFeed: new(event.Feed), @@ -55,7 +55,7 @@ func TestSlasher_receiveAttestations_OK(t *testing.T) { } func TestService_pruneSlasherDataWithinSlidingWindow_AttestationsPruned(t *testing.T) { - ctx := context.Background() + ctx := t.Context() params := DefaultParams() params.historyLength = 4 // 4 epochs worth of history. slasherDB := dbtest.SetupSlasherDB(t) @@ -127,7 +127,7 @@ func TestService_pruneSlasherDataWithinSlidingWindow_AttestationsPruned(t *testi } func TestService_pruneSlasherDataWithinSlidingWindow_ProposalsPruned(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // Override beacon config to 1 slot per epoch for easier testing. params2.SetupTestConfigCleanup(t) @@ -206,7 +206,7 @@ func TestService_pruneSlasherDataWithinSlidingWindow_ProposalsPruned(t *testing. } func TestSlasher_receiveAttestations_OnlyValidAttestations(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) s := &Service{ serviceCfg: &ServiceConfig{ IndexedAttestationsFeed: new(event.Feed), @@ -246,7 +246,7 @@ func TestSlasher_receiveAttestations_OnlyValidAttestations(t *testing.T) { } func TestSlasher_receiveBlocks_OK(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) s := &Service{ serviceCfg: &ServiceConfig{ BeaconBlockHeadersFeed: new(event.Feed), @@ -301,7 +301,7 @@ func TestService_processQueuedBlocks(t *testing.T) { s.blksQueue.extend([]*slashertypes.SignedBlockHeaderWrapper{ createProposalWrapper(t, 0, 1, nil), }) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) tickerChan := make(chan primitives.Slot) s.wg.Add(1) go func() { diff --git a/beacon-chain/slasher/service_test.go b/beacon-chain/slasher/service_test.go index 4357bc4518..46a5ea0a17 100644 --- a/beacon-chain/slasher/service_test.go +++ b/beacon-chain/slasher/service_test.go @@ -1,7 +1,6 @@ package slasher import ( - "context" "io" "os" "testing" @@ -39,7 +38,7 @@ func TestService_StartStop_ChainInitialized(t *testing.T) { Slot: ¤tSlot, } gs := startup.NewClockSynchronizer() - srv, err := New(context.Background(), &ServiceConfig{ + srv, err := New(t.Context(), &ServiceConfig{ IndexedAttestationsFeed: new(event.Feed), BeaconBlockHeadersFeed: new(event.Feed), StateNotifier: &mock.MockStateNotifier{}, diff --git a/beacon-chain/startup/synchronizer_test.go b/beacon-chain/startup/synchronizer_test.go index 993c7ed82f..630510955e 100644 --- a/beacon-chain/startup/synchronizer_test.go +++ b/beacon-chain/startup/synchronizer_test.go @@ -16,7 +16,7 @@ func TestSynchronizerErrOnSecondSet(t *testing.T) { } func TestWaitForClockCanceled(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() s := NewClockSynchronizer() c, err := s.WaitForClock(ctx) @@ -25,7 +25,7 @@ func TestWaitForClockCanceled(t *testing.T) { } func TestWaitForClock(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() s := NewClockSynchronizer() var vr [32]byte diff --git a/beacon-chain/state/state-native/hasher_test.go b/beacon-chain/state/state-native/hasher_test.go index 933d8d0965..b3870f9829 100644 --- a/beacon-chain/state/state-native/hasher_test.go +++ b/beacon-chain/state/state-native/hasher_test.go @@ -1,7 +1,6 @@ package state_native_test import ( - "context" "testing" statenative "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native" @@ -52,7 +51,7 @@ func TestComputeFieldRootsWithHasher_Phase0(t *testing.T) { require.NoError(t, err) s, ok := initState.(*statenative.BeaconState) require.Equal(t, true, ok) - root, err := statenative.ComputeFieldRootsWithHasher(context.Background(), s) + root, err := statenative.ComputeFieldRootsWithHasher(t.Context(), s) require.NoError(t, err) expected := [][]byte{ {0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, @@ -119,7 +118,7 @@ func TestComputeFieldRootsWithHasher_Altair(t *testing.T) { s, ok := initState.(*statenative.BeaconState) require.Equal(t, true, ok) - root, err := statenative.ComputeFieldRootsWithHasher(context.Background(), s) + root, err := statenative.ComputeFieldRootsWithHasher(t.Context(), s) require.NoError(t, err) expected := [][]byte{ {0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, @@ -192,7 +191,7 @@ func TestComputeFieldRootsWithHasher_Bellatrix(t *testing.T) { s, ok := initState.(*statenative.BeaconState) require.Equal(t, true, ok) - root, err := statenative.ComputeFieldRootsWithHasher(context.Background(), s) + root, err := statenative.ComputeFieldRootsWithHasher(t.Context(), s) require.NoError(t, err) expected := [][]byte{ {0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, @@ -272,7 +271,7 @@ func TestComputeFieldRootsWithHasher_Capella(t *testing.T) { s, ok := initState.(*statenative.BeaconState) require.Equal(t, true, ok) - root, err := statenative.ComputeFieldRootsWithHasher(context.Background(), s) + root, err := statenative.ComputeFieldRootsWithHasher(t.Context(), s) require.NoError(t, err) expected := [][]byte{ {0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, diff --git a/beacon-chain/state/state-native/proofs_test.go b/beacon-chain/state/state-native/proofs_test.go index 4c9c1884df..b35627ebb8 100644 --- a/beacon-chain/state/state-native/proofs_test.go +++ b/beacon-chain/state/state-native/proofs_test.go @@ -1,7 +1,6 @@ package state_native_test import ( - "context" "testing" statenative "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native" @@ -12,7 +11,7 @@ import ( ) func TestBeaconStateMerkleProofs_phase0_notsupported(t *testing.T) { - ctx := context.Background() + ctx := t.Context() st, _ := util.DeterministicGenesisState(t, 256) t.Run("current sync committee", func(t *testing.T) { _, err := st.CurrentSyncCommitteeProof(ctx) @@ -28,7 +27,7 @@ func TestBeaconStateMerkleProofs_phase0_notsupported(t *testing.T) { }) } func TestBeaconStateMerkleProofs_altair(t *testing.T) { - ctx := context.Background() + ctx := t.Context() altair, err := util.NewBeaconStateAltair() require.NoError(t, err) htr, err := altair.HashTreeRoot(ctx) @@ -97,7 +96,7 @@ func TestBeaconStateMerkleProofs_altair(t *testing.T) { } func TestBeaconStateMerkleProofs_bellatrix(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bellatrix, err := util.NewBeaconStateBellatrix() require.NoError(t, err) htr, err := bellatrix.HashTreeRoot(ctx) diff --git a/beacon-chain/state/state-native/setters_attestation_test.go b/beacon-chain/state/state-native/setters_attestation_test.go index b291909d6a..12144219d1 100644 --- a/beacon-chain/state/state-native/setters_attestation_test.go +++ b/beacon-chain/state/state-native/setters_attestation_test.go @@ -1,7 +1,6 @@ package state_native import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types" @@ -53,14 +52,14 @@ func TestAppendBeyondIndicesLimit(t *testing.T) { RandaoMixes: mockrandaoMixes, }) require.NoError(t, err) - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) require.NoError(t, err) s, ok := st.(*BeaconState) require.Equal(t, true, ok) for i := types.FieldIndex(0); i < types.FieldIndex(params.BeaconConfig().BeaconStateFieldCount); i++ { s.dirtyFields[i] = true } - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) require.NoError(t, err) for i := 0; i < 10; i++ { assert.NoError(t, st.AppendValidator(ðpb.Validator{})) diff --git a/beacon-chain/state/state-native/state_fuzz_test.go b/beacon-chain/state/state-native/state_fuzz_test.go index 55e4aebf0e..00bebc68cb 100644 --- a/beacon-chain/state/state-native/state_fuzz_test.go +++ b/beacon-chain/state/state-native/state_fuzz_test.go @@ -1,7 +1,6 @@ package state_native_test import ( - "context" "testing" coreState "github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition" @@ -45,11 +44,11 @@ func FuzzPhase0StateHashTreeRoot(f *testing.F) { stateObj, err := native.InitializeFromProtoUnsafePhase0(pbState) assert.NoError(t, err) for stateObj.Slot() < primitives.Slot(slotsToTransition) { - stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1) + stateObj, err = coreState.ProcessSlots(t.Context(), stateObj, stateObj.Slot()+1) assert.NoError(t, err) stateObj.Copy() - nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1) + nativeState, err = coreState.ProcessSlots(t.Context(), nativeState, nativeState.Slot()+1) assert.NoError(t, err) nativeState.Copy() } @@ -60,9 +59,9 @@ func FuzzPhase0StateHashTreeRoot(f *testing.F) { newState, err := native.InitializeFromProtoUnsafePhase0(innerState) assert.NoError(t, err) - newRt, newErr := newState.HashTreeRoot(context.Background()) - rt, err := stateObj.HashTreeRoot(context.Background()) - nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background()) + newRt, newErr := newState.HashTreeRoot(t.Context()) + rt, err := stateObj.HashTreeRoot(t.Context()) + nativeRt, nativeErr := nativeState.HashTreeRoot(t.Context()) assert.Equal(t, newErr != nil, err != nil) assert.Equal(t, newErr != nil, nativeErr != nil) @@ -116,11 +115,11 @@ func FuzzAltairStateHashTreeRoot(f *testing.F) { stateObj, err := native.InitializeFromProtoUnsafeAltair(pbState) assert.NoError(t, err) for stateObj.Slot() < primitives.Slot(slotsToTransition) { - stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1) + stateObj, err = coreState.ProcessSlots(t.Context(), stateObj, stateObj.Slot()+1) assert.NoError(t, err) stateObj.Copy() - nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1) + nativeState, err = coreState.ProcessSlots(t.Context(), nativeState, nativeState.Slot()+1) assert.NoError(t, err) nativeState.Copy() } @@ -131,9 +130,9 @@ func FuzzAltairStateHashTreeRoot(f *testing.F) { newState, err := native.InitializeFromProtoUnsafeAltair(innerState) assert.NoError(t, err) - newRt, newErr := newState.HashTreeRoot(context.Background()) - rt, err := stateObj.HashTreeRoot(context.Background()) - nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background()) + newRt, newErr := newState.HashTreeRoot(t.Context()) + rt, err := stateObj.HashTreeRoot(t.Context()) + nativeRt, nativeErr := nativeState.HashTreeRoot(t.Context()) assert.Equal(t, newErr != nil, err != nil) assert.Equal(t, newErr != nil, nativeErr != nil) if err == nil { @@ -186,11 +185,11 @@ func FuzzBellatrixStateHashTreeRoot(f *testing.F) { stateObj, err := native.InitializeFromProtoUnsafeBellatrix(pbState) assert.NoError(t, err) for stateObj.Slot() < primitives.Slot(slotsToTransition) { - stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1) + stateObj, err = coreState.ProcessSlots(t.Context(), stateObj, stateObj.Slot()+1) assert.NoError(t, err) stateObj.Copy() - nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1) + nativeState, err = coreState.ProcessSlots(t.Context(), nativeState, nativeState.Slot()+1) assert.NoError(t, err) nativeState.Copy() } @@ -201,9 +200,9 @@ func FuzzBellatrixStateHashTreeRoot(f *testing.F) { newState, err := native.InitializeFromProtoUnsafeBellatrix(innerState) assert.NoError(t, err) - newRt, newErr := newState.HashTreeRoot(context.Background()) - rt, err := stateObj.HashTreeRoot(context.Background()) - nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background()) + newRt, newErr := newState.HashTreeRoot(t.Context()) + rt, err := stateObj.HashTreeRoot(t.Context()) + nativeRt, nativeErr := nativeState.HashTreeRoot(t.Context()) assert.Equal(t, newErr != nil, err != nil) assert.Equal(t, newErr != nil, nativeErr != nil) if err == nil { @@ -256,11 +255,11 @@ func FuzzCapellaStateHashTreeRoot(f *testing.F) { stateObj, err := native.InitializeFromProtoUnsafeCapella(pbState) assert.NoError(t, err) for stateObj.Slot() < primitives.Slot(slotsToTransition) { - stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1) + stateObj, err = coreState.ProcessSlots(t.Context(), stateObj, stateObj.Slot()+1) assert.NoError(t, err) stateObj.Copy() - nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1) + nativeState, err = coreState.ProcessSlots(t.Context(), nativeState, nativeState.Slot()+1) assert.NoError(t, err) nativeState.Copy() } @@ -271,9 +270,9 @@ func FuzzCapellaStateHashTreeRoot(f *testing.F) { newState, err := native.InitializeFromProtoUnsafeCapella(innerState) assert.NoError(t, err) - newRt, newErr := newState.HashTreeRoot(context.Background()) - rt, err := stateObj.HashTreeRoot(context.Background()) - nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background()) + newRt, newErr := newState.HashTreeRoot(t.Context()) + rt, err := stateObj.HashTreeRoot(t.Context()) + nativeRt, nativeErr := nativeState.HashTreeRoot(t.Context()) assert.Equal(t, newErr != nil, err != nil) assert.Equal(t, newErr != nil, nativeErr != nil) if err == nil { @@ -326,11 +325,11 @@ func FuzzDenebStateHashTreeRoot(f *testing.F) { stateObj, err := native.InitializeFromProtoUnsafeDeneb(pbState) assert.NoError(t, err) for stateObj.Slot() < primitives.Slot(slotsToTransition) { - stateObj, err = coreState.ProcessSlots(context.Background(), stateObj, stateObj.Slot()+1) + stateObj, err = coreState.ProcessSlots(t.Context(), stateObj, stateObj.Slot()+1) assert.NoError(t, err) stateObj.Copy() - nativeState, err = coreState.ProcessSlots(context.Background(), nativeState, nativeState.Slot()+1) + nativeState, err = coreState.ProcessSlots(t.Context(), nativeState, nativeState.Slot()+1) assert.NoError(t, err) nativeState.Copy() } @@ -341,9 +340,9 @@ func FuzzDenebStateHashTreeRoot(f *testing.F) { newState, err := native.InitializeFromProtoUnsafeDeneb(innerState) assert.NoError(t, err) - newRt, newErr := newState.HashTreeRoot(context.Background()) - rt, err := stateObj.HashTreeRoot(context.Background()) - nativeRt, nativeErr := nativeState.HashTreeRoot(context.Background()) + newRt, newErr := newState.HashTreeRoot(t.Context()) + rt, err := stateObj.HashTreeRoot(t.Context()) + nativeRt, nativeErr := nativeState.HashTreeRoot(t.Context()) assert.Equal(t, newErr != nil, err != nil) assert.Equal(t, newErr != nil, nativeErr != nil) if err == nil { diff --git a/beacon-chain/state/state-native/state_test.go b/beacon-chain/state/state-native/state_test.go index 48e17db0b5..57f04eef63 100644 --- a/beacon-chain/state/state-native/state_test.go +++ b/beacon-chain/state/state-native/state_test.go @@ -1,7 +1,6 @@ package state_native import ( - "context" "fmt" "strconv" "sync" @@ -305,7 +304,7 @@ func TestBeaconState_AppendBalanceWithTrie(t *testing.T) { newState := generateState(t) st, ok := newState.(*BeaconState) require.Equal(t, true, ok) - _, err := st.HashTreeRoot(context.Background()) + _, err := st.HashTreeRoot(t.Context()) assert.NoError(t, err) for i := 0; i < 100; i++ { @@ -316,7 +315,7 @@ func TestBeaconState_AppendBalanceWithTrie(t *testing.T) { assert.NoError(t, st.AppendBalance(1000)) } } - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) assert.NoError(t, err) newRt := bytesutil.ToBytes32(st.merkleLayers[0][types.Balances]) wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.Balances()) @@ -342,13 +341,13 @@ func TestBeaconState_ModifyCurrentParticipationBits(t *testing.T) { func TestCopyAllTries(t *testing.T) { newState := generateState(t) - _, err := newState.HashTreeRoot(context.Background()) + _, err := newState.HashTreeRoot(t.Context()) assert.NoError(t, err) assert.NoError(t, newState.UpdateBalancesAtIndex(0, 10000)) assert.NoError(t, newState.UpdateBlockRootAtIndex(0, [32]byte{'a'})) - _, err = newState.HashTreeRoot(context.Background()) + _, err = newState.HashTreeRoot(t.Context()) assert.NoError(t, err) st, ok := newState.(*BeaconState) @@ -377,7 +376,7 @@ func TestCopyAllTries(t *testing.T) { assert.NoError(t, nState.UpdateBalancesAtIndex(20, 10000)) - _, err = nState.HashTreeRoot(context.Background()) + _, err = nState.HashTreeRoot(t.Context()) assert.NoError(t, err) rt, err := st.stateFieldLeaves[types.Balances].TrieRoot() diff --git a/beacon-chain/state/state-native/state_trie_test.go b/beacon-chain/state/state-native/state_trie_test.go index 6766b918f7..e45459727f 100644 --- a/beacon-chain/state/state-native/state_trie_test.go +++ b/beacon-chain/state/state-native/state_trie_test.go @@ -2,7 +2,6 @@ package state_native_test import ( "bytes" - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/state" @@ -484,7 +483,7 @@ func TestBeaconState_HashTreeRoot(t *testing.T) { t.Run(tt.name, func(t *testing.T) { testState, err = tt.stateModify(testState) assert.NoError(t, err) - root, err := testState.HashTreeRoot(context.Background()) + root, err := testState.HashTreeRoot(t.Context()) if err == nil && tt.error != "" { t.Errorf("Expected error, expected %v, received %v", tt.error, err) } @@ -512,7 +511,7 @@ func BenchmarkBeaconState(b *testing.B) { b.Run("Vectorized SHA256", func(b *testing.B) { st, err := statenative.InitializeFromProtoUnsafePhase0(pbState) require.NoError(b, err) - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(b.Context()) assert.NoError(b, err) }) @@ -571,7 +570,7 @@ func TestBeaconState_HashTreeRoot_FieldTrie(t *testing.T) { t.Run(tt.name, func(t *testing.T) { testState, err = tt.stateModify(testState) assert.NoError(t, err) - root, err := testState.HashTreeRoot(context.Background()) + root, err := testState.HashTreeRoot(t.Context()) if err == nil && tt.error != "" { t.Errorf("Expected error, expected %v, received %v", tt.error, err) } @@ -611,12 +610,12 @@ func TestBeaconState_ValidatorMutation_Phase0(t *testing.T) { testState, err = statenative.InitializeFromProtoPhase0(pbState) require.NoError(t, err) - _, err = testState.HashTreeRoot(context.Background()) + _, err = testState.HashTreeRoot(t.Context()) require.NoError(t, err) // Reset tries require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator))) - _, err = testState.HashTreeRoot(context.Background()) + _, err = testState.HashTreeRoot(t.Context()) require.NoError(t, err) newState1 := testState.Copy() @@ -633,7 +632,7 @@ func TestBeaconState_ValidatorMutation_Phase0(t *testing.T) { WithdrawableEpoch: 1117, })) - rt, err := testState.HashTreeRoot(context.Background()) + rt, err := testState.HashTreeRoot(t.Context()) require.NoError(t, err) pbState, err = statenative.ProtobufBeaconStatePhase0(testState.ToProtoUnsafe()) require.NoError(t, err) @@ -641,7 +640,7 @@ func TestBeaconState_ValidatorMutation_Phase0(t *testing.T) { copiedTestState, err := statenative.InitializeFromProtoPhase0(pbState) require.NoError(t, err) - rt2, err := copiedTestState.HashTreeRoot(context.Background()) + rt2, err := copiedTestState.HashTreeRoot(t.Context()) require.NoError(t, err) assert.Equal(t, rt, rt2) @@ -657,7 +656,7 @@ func TestBeaconState_ValidatorMutation_Phase0(t *testing.T) { WithdrawableEpoch: 2117, })) - rt, err = newState1.HashTreeRoot(context.Background()) + rt, err = newState1.HashTreeRoot(t.Context()) require.NoError(t, err) pbState, err = statenative.ProtobufBeaconStatePhase0(newState1.ToProtoUnsafe()) require.NoError(t, err) @@ -665,7 +664,7 @@ func TestBeaconState_ValidatorMutation_Phase0(t *testing.T) { copiedTestState, err = statenative.InitializeFromProtoPhase0(pbState) require.NoError(t, err) - rt2, err = copiedTestState.HashTreeRoot(context.Background()) + rt2, err = copiedTestState.HashTreeRoot(t.Context()) require.NoError(t, err) assert.Equal(t, rt, rt2) @@ -678,12 +677,12 @@ func TestBeaconState_ValidatorMutation_Altair(t *testing.T) { testState, err = statenative.InitializeFromProtoAltair(pbState) require.NoError(t, err) - _, err = testState.HashTreeRoot(context.Background()) + _, err = testState.HashTreeRoot(t.Context()) require.NoError(t, err) // Reset tries require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator))) - _, err = testState.HashTreeRoot(context.Background()) + _, err = testState.HashTreeRoot(t.Context()) require.NoError(t, err) newState1 := testState.Copy() @@ -700,7 +699,7 @@ func TestBeaconState_ValidatorMutation_Altair(t *testing.T) { WithdrawableEpoch: 1117, })) - rt, err := testState.HashTreeRoot(context.Background()) + rt, err := testState.HashTreeRoot(t.Context()) require.NoError(t, err) pbState, err = statenative.ProtobufBeaconStateAltair(testState.ToProtoUnsafe()) require.NoError(t, err) @@ -708,7 +707,7 @@ func TestBeaconState_ValidatorMutation_Altair(t *testing.T) { copiedTestState, err := statenative.InitializeFromProtoAltair(pbState) require.NoError(t, err) - rt2, err := copiedTestState.HashTreeRoot(context.Background()) + rt2, err := copiedTestState.HashTreeRoot(t.Context()) require.NoError(t, err) assert.Equal(t, rt, rt2) @@ -724,7 +723,7 @@ func TestBeaconState_ValidatorMutation_Altair(t *testing.T) { WithdrawableEpoch: 2117, })) - rt, err = newState1.HashTreeRoot(context.Background()) + rt, err = newState1.HashTreeRoot(t.Context()) require.NoError(t, err) pbState, err = statenative.ProtobufBeaconStateAltair(newState1.ToProtoUnsafe()) require.NoError(t, err) @@ -732,7 +731,7 @@ func TestBeaconState_ValidatorMutation_Altair(t *testing.T) { copiedTestState, err = statenative.InitializeFromProtoAltair(pbState) require.NoError(t, err) - rt2, err = copiedTestState.HashTreeRoot(context.Background()) + rt2, err = copiedTestState.HashTreeRoot(t.Context()) require.NoError(t, err) assert.Equal(t, rt, rt2) @@ -745,12 +744,12 @@ func TestBeaconState_ValidatorMutation_Bellatrix(t *testing.T) { testState, err = statenative.InitializeFromProtoBellatrix(pbState) require.NoError(t, err) - _, err = testState.HashTreeRoot(context.Background()) + _, err = testState.HashTreeRoot(t.Context()) require.NoError(t, err) // Reset tries require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator))) - _, err = testState.HashTreeRoot(context.Background()) + _, err = testState.HashTreeRoot(t.Context()) require.NoError(t, err) newState1 := testState.Copy() @@ -767,7 +766,7 @@ func TestBeaconState_ValidatorMutation_Bellatrix(t *testing.T) { WithdrawableEpoch: 1117, })) - rt, err := testState.HashTreeRoot(context.Background()) + rt, err := testState.HashTreeRoot(t.Context()) require.NoError(t, err) pbState, err = statenative.ProtobufBeaconStateBellatrix(testState.ToProtoUnsafe()) require.NoError(t, err) @@ -775,7 +774,7 @@ func TestBeaconState_ValidatorMutation_Bellatrix(t *testing.T) { copiedTestState, err := statenative.InitializeFromProtoBellatrix(pbState) require.NoError(t, err) - rt2, err := copiedTestState.HashTreeRoot(context.Background()) + rt2, err := copiedTestState.HashTreeRoot(t.Context()) require.NoError(t, err) assert.Equal(t, rt, rt2) @@ -791,7 +790,7 @@ func TestBeaconState_ValidatorMutation_Bellatrix(t *testing.T) { WithdrawableEpoch: 2117, })) - rt, err = newState1.HashTreeRoot(context.Background()) + rt, err = newState1.HashTreeRoot(t.Context()) require.NoError(t, err) pbState, err = statenative.ProtobufBeaconStateBellatrix(newState1.ToProtoUnsafe()) require.NoError(t, err) @@ -799,7 +798,7 @@ func TestBeaconState_ValidatorMutation_Bellatrix(t *testing.T) { copiedTestState, err = statenative.InitializeFromProtoBellatrix(pbState) require.NoError(t, err) - rt2, err = copiedTestState.HashTreeRoot(context.Background()) + rt2, err = copiedTestState.HashTreeRoot(t.Context()) require.NoError(t, err) assert.Equal(t, rt, rt2) @@ -811,7 +810,7 @@ func TestBeaconState_InitializeInactivityScoresCorrectly_Deneb(t *testing.T) { }) defer resetCfg() st, _ := util.DeterministicGenesisStateDeneb(t, 200) - _, err := st.HashTreeRoot(context.Background()) + _, err := st.HashTreeRoot(t.Context()) require.NoError(t, err) ic, err := st.InactivityScores() require.NoError(t, err) @@ -821,13 +820,13 @@ func TestBeaconState_InitializeInactivityScoresCorrectly_Deneb(t *testing.T) { err = st.SetInactivityScores(ic) require.NoError(t, err) - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) require.NoError(t, err) ic[150] = 2390239 err = st.SetInactivityScores(ic) require.NoError(t, err) - rt, err := st.HashTreeRoot(context.Background()) + rt, err := st.HashTreeRoot(t.Context()) require.NoError(t, err) copiedSt, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb) @@ -837,7 +836,7 @@ func TestBeaconState_InitializeInactivityScoresCorrectly_Deneb(t *testing.T) { newSt, err := statenative.InitializeFromProtoUnsafeDeneb(copiedSt) require.NoError(t, err) - newRt, err := newSt.HashTreeRoot(context.Background()) + newRt, err := newSt.HashTreeRoot(t.Context()) require.NoError(t, err) require.DeepSSZEqual(t, rt, newRt) diff --git a/beacon-chain/state/state-native/types_test.go b/beacon-chain/state/state-native/types_test.go index ee1e8ed77f..accf2ed18e 100644 --- a/beacon-chain/state/state-native/types_test.go +++ b/beacon-chain/state/state-native/types_test.go @@ -1,7 +1,6 @@ package state_native_test import ( - "context" "reflect" "strconv" "testing" @@ -20,7 +19,7 @@ import ( func TestBeaconState_ProtoBeaconStateCompatibility(t *testing.T) { params.SetupTestConfigCleanup(t) - ctx := context.Background() + ctx := t.Context() genesis := setupGenesisState(t, 64) customState, err := statenative.InitializeFromProtoPhase0(genesis) require.NoError(t, err) @@ -33,7 +32,7 @@ func TestBeaconState_ProtoBeaconStateCompatibility(t *testing.T) { require.NoError(t, err) beaconState, err := statenative.InitializeFromProtoPhase0(genesis) require.NoError(t, err) - r2, err := beaconState.HashTreeRoot(context.Background()) + r2, err := beaconState.HashTreeRoot(t.Context()) require.NoError(t, err) assert.Equal(t, r1, r2, "Mismatched roots") @@ -46,14 +45,14 @@ func TestBeaconState_ProtoBeaconStateCompatibility(t *testing.T) { genesis.Balances = balances beaconState, err = statenative.InitializeFromProtoPhase0(genesis) require.NoError(t, err) - r2, err = beaconState.HashTreeRoot(context.Background()) + r2, err = beaconState.HashTreeRoot(t.Context()) require.NoError(t, err) assert.Equal(t, r1, r2, "Mismatched roots") } -func setupGenesisState(tb testing.TB, count uint64) *ethpb.BeaconState { - genesisState, _, err := interop.GenerateGenesisState(context.Background(), 0, count) - require.NoError(tb, err, "Could not generate genesis beacon state") +func setupGenesisState(t testing.TB, count uint64) *ethpb.BeaconState { + genesisState, _, err := interop.GenerateGenesisState(t.Context(), 0, count) + require.NoError(t, err, "Could not generate genesis beacon state") for i := uint64(1); i < count; i++ { var someRoot [32]byte var someKey [fieldparams.BLSPubkeyLength]byte diff --git a/beacon-chain/state/stategen/getter_test.go b/beacon-chain/state/stategen/getter_test.go index f502a2b36b..e73f3bd6a0 100644 --- a/beacon-chain/state/stategen/getter_test.go +++ b/beacon-chain/state/stategen/getter_test.go @@ -21,7 +21,7 @@ import ( ) func TestStateByRoot_GenesisState(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -38,7 +38,7 @@ func TestStateByRoot_GenesisState(t *testing.T) { } func TestStateByRoot_ColdState(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -77,7 +77,7 @@ func TestStateByRoot_ColdState(t *testing.T) { } func TestStateByRootIfCachedNoCopy_HotState(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -92,7 +92,7 @@ func TestStateByRootIfCachedNoCopy_HotState(t *testing.T) { } func TestStateByRootIfCachedNoCopy_ColdState(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -115,7 +115,7 @@ func TestStateByRootIfCachedNoCopy_ColdState(t *testing.T) { } func TestDeleteStateFromCaches(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -186,7 +186,7 @@ type testSetupSlots struct { } func TestLoadStateByRoot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() persistEpochBoundary := func(r testChain, slot primitives.Slot) { require.NoError(t, r.srv.epochBoundaryStateCache.put(r.blockRoot(r.t, slot), r.state(t, slot))) } @@ -401,9 +401,9 @@ func TestLoadStateByRoot(t *testing.T) { require.NoError(t, err) // make state at slot 10 by transitioning a copy of st9 with ib10 (aka blk10) - st10, err = executeStateTransitionStateGen(context.Background(), st10, ib10) + st10, err = executeStateTransitionStateGen(t.Context(), st10, ib10) require.NoError(t, err) - st10Root, err := st10.HashTreeRoot(context.Background()) + st10Root, err := st10.HashTreeRoot(t.Context()) require.NoError(t, err) // update state root for block 10 now that its been through stf blk10.Block.StateRoot = st10Root[:] @@ -416,7 +416,7 @@ func TestLoadStateByRoot(t *testing.T) { blk11 := util.NewBeaconBlock() blk11.Block.Slot = 11 blk11.Block.ParentRoot = rob10.RootSlice() - idx11, err := helpers.BeaconProposerIndexAtSlot(context.Background(), st10, blk11.Block.Slot) + idx11, err := helpers.BeaconProposerIndexAtSlot(t.Context(), st10, blk11.Block.Slot) require.NoError(t, err) blk11.Block.ProposerIndex = idx11 ib11, err := blt.NewSignedBeaconBlock(blk11) @@ -424,9 +424,9 @@ func TestLoadStateByRoot(t *testing.T) { // same steps as 9->10; stf 10->11, then block update st11 := st10.Copy() - st11, err = executeStateTransitionStateGen(context.Background(), st11, ib11) + st11, err = executeStateTransitionStateGen(t.Context(), st11, ib11) require.NoError(t, err) - st11Root, err := st11.HashTreeRoot(context.Background()) + st11Root, err := st11.HashTreeRoot(t.Context()) require.NoError(t, err) // update state root for block 11 now that its been through stf blk11.Block.StateRoot = st11Root[:] @@ -489,7 +489,7 @@ func TestLoadStateByRoot(t *testing.T) { } func TestLastAncestorState_CanGetUsingDB(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -529,7 +529,7 @@ func TestLastAncestorState_CanGetUsingDB(t *testing.T) { } func TestLastAncestorState_CanGetUsingCache(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -569,7 +569,7 @@ func TestLastAncestorState_CanGetUsingCache(t *testing.T) { } func TestState_HasState(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) s, err := util.NewBeaconState() @@ -601,7 +601,7 @@ func TestState_HasState(t *testing.T) { } func TestState_HasStateInCache(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) s, err := util.NewBeaconState() diff --git a/beacon-chain/state/stategen/history_test.go b/beacon-chain/state/stategen/history_test.go index 9e7d7d9909..94d798e42e 100644 --- a/beacon-chain/state/stategen/history_test.go +++ b/beacon-chain/state/stategen/history_test.go @@ -18,7 +18,7 @@ func TestBlockForSlotFuture(t *testing.T) { ch := &CanonicalHistory{ cs: &mockCurrentSlotter{Slot: 0}, } - _, err := ch.BlockRootForSlot(context.Background(), 1) + _, err := ch.BlockRootForSlot(t.Context(), 1) require.ErrorIs(t, err, ErrFutureSlotRequested) } @@ -26,7 +26,7 @@ func TestChainForSlotFuture(t *testing.T) { ch := &CanonicalHistory{ cs: &mockCurrentSlotter{Slot: 0}, } - _, _, err := ch.chainForSlot(context.Background(), 1) + _, _, err := ch.chainForSlot(t.Context(), 1) require.ErrorIs(t, err, ErrFutureSlotRequested) } @@ -88,7 +88,7 @@ func TestBestForSlot(t *testing.T) { chk = c.cc } ch := &CanonicalHistory{cc: chk} - r, err := ch.bestForSlot(context.Background(), c.roots) + r, err := ch.bestForSlot(t.Context(), c.roots) if c.err == nil { require.NoError(t, err) require.Equal(t, c.root, r) @@ -101,7 +101,7 @@ func TestBestForSlot(t *testing.T) { // happy path tests func TestCanonicalBlockForSlotHappy(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var begin, middle, end primitives.Slot = 100, 150, 155 specs := []mockHistorySpec{ {slot: begin}, @@ -142,7 +142,7 @@ func TestCanonicalBlockForSlotHappy(t *testing.T) { } func TestCanonicalBlockForSlotNonHappy(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var begin, middle, end primitives.Slot = 100, 150, 155 specs := []mockHistorySpec{ {slot: begin}, @@ -271,7 +271,7 @@ func (c *mockCurrentSlotter) CurrentSlot() primitives.Slot { var _ CurrentSlotter = &mockCurrentSlotter{} func TestAncestorChainCache(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var begin, middle, end primitives.Slot = 100, 150, 155 specs := []mockHistorySpec{ {slot: begin, canonicalBlock: true}, @@ -344,7 +344,7 @@ func TestAncestorChainCache(t *testing.T) { } func TestAncestorChainOK(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var begin, middle, end primitives.Slot = 100, 150, 155 specs := []mockHistorySpec{ {slot: begin}, @@ -377,7 +377,7 @@ func TestAncestorChainOK(t *testing.T) { } func TestChainForSlot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var zero, one, two, three primitives.Slot = 50, 51, 150, 151 specs := []mockHistorySpec{ {slot: zero, canonicalBlock: true, savedState: true}, @@ -446,7 +446,7 @@ func TestChainForSlot(t *testing.T) { } func TestAncestorChainOrdering(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var zero, one, two, three, four, five primitives.Slot = 50, 51, 150, 151, 152, 200 specs := []mockHistorySpec{ {slot: zero}, diff --git a/beacon-chain/state/stategen/migrate_test.go b/beacon-chain/state/stategen/migrate_test.go index 2eecfc9e32..d8878c1c5b 100644 --- a/beacon-chain/state/stategen/migrate_test.go +++ b/beacon-chain/state/stategen/migrate_test.go @@ -1,7 +1,6 @@ package stategen import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks" @@ -17,7 +16,7 @@ import ( ) func TestMigrateToCold_CanSaveFinalizedInfo(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) beaconState, _ := util.DeterministicGenesisState(t, 32) @@ -41,7 +40,7 @@ func TestMigrateToCold_CanSaveFinalizedInfo(t *testing.T) { func TestMigrateToCold_HappyPath(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -71,7 +70,7 @@ func TestMigrateToCold_HappyPath(t *testing.T) { func TestMigrateToCold_RegeneratePath(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -121,7 +120,7 @@ func TestMigrateToCold_RegeneratePath(t *testing.T) { func TestMigrateToCold_StateExistsInDB(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -145,7 +144,7 @@ func TestMigrateToCold_StateExistsInDB(t *testing.T) { func TestMigrateToCold_ParallelCalls(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) diff --git a/beacon-chain/state/stategen/mock_test.go b/beacon-chain/state/stategen/mock_test.go index cfce6ed522..e3d9f25bbf 100644 --- a/beacon-chain/state/stategen/mock_test.go +++ b/beacon-chain/state/stategen/mock_test.go @@ -21,7 +21,7 @@ import ( ) func TestMockHistoryStates(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var begin, middle, end primitives.Slot = 100, 150, 155 specs := []mockHistorySpec{ {slot: begin}, @@ -60,7 +60,7 @@ func TestMockHistoryStates(t *testing.T) { } func TestMockHistoryParentRoot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var begin, middle, end primitives.Slot = 100, 150, 155 specs := []mockHistorySpec{ {slot: begin}, @@ -108,9 +108,9 @@ func (m slotList) Swap(i, j int) { var errFallThroughOverride = errors.New("override yielding control back to real HighestRootsBelowSlot") -func (m *mockHistory) HighestRootsBelowSlot(_ context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error) { +func (m *mockHistory) HighestRootsBelowSlot(ctx context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error) { if m.overrideHighestSlotBlocksBelow != nil { - s, r, err := m.overrideHighestSlotBlocksBelow(context.Background(), slot) + s, r, err := m.overrideHighestSlotBlocksBelow(ctx, slot) if !errors.Is(err, errFallThroughOverride) { return s, r, err } @@ -196,7 +196,7 @@ func (h *mockHistory) validateRoots() error { } func newMockHistory(t *testing.T, hist []mockHistorySpec, current primitives.Slot) *mockHistory { - ctx := context.Background() + ctx := t.Context() mh := &mockHistory{ blocks: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{}, canonical: map[[32]byte]bool{}, diff --git a/beacon-chain/state/stategen/replay_test.go b/beacon-chain/state/stategen/replay_test.go index 01957cd9b4..8361cbed8b 100644 --- a/beacon-chain/state/stategen/replay_test.go +++ b/beacon-chain/state/stategen/replay_test.go @@ -1,7 +1,6 @@ package stategen import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks" @@ -49,7 +48,7 @@ func TestReplayBlocks_AllSkipSlots(t *testing.T) { service := New(beaconDB, doublylinkedtree.New()) targetSlot := params.BeaconConfig().SlotsPerEpoch - 1 - newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) + newState, err := service.replayBlocks(t.Context(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) require.NoError(t, err) assert.Equal(t, targetSlot, newState.Slot(), "Did not advance slots") } @@ -78,7 +77,7 @@ func TestReplayBlocks_SameSlot(t *testing.T) { service := New(beaconDB, doublylinkedtree.New()) targetSlot := beaconState.Slot() - newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) + newState, err := service.replayBlocks(t.Context(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) require.NoError(t, err) assert.Equal(t, targetSlot, newState.Slot(), "Did not advance slots") } @@ -112,7 +111,7 @@ func TestReplayBlocks_LowerSlotBlock(t *testing.T) { b.Block.Slot = beaconState.Slot() - 1 wsb, err := consensusblocks.NewSignedBeaconBlock(b) require.NoError(t, err) - newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{wsb}, targetSlot) + newState, err := service.replayBlocks(t.Context(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{wsb}, targetSlot) require.NoError(t, err) assert.Equal(t, targetSlot, newState.Slot(), "Did not advance slots") } @@ -138,7 +137,7 @@ func TestReplayBlocks_ThroughForkBoundary(t *testing.T) { service := New(testDB.SetupDB(t), doublylinkedtree.New()) targetSlot := params.BeaconConfig().SlotsPerEpoch - newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) + newState, err := service.replayBlocks(t.Context(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) require.NoError(t, err) // Verify state is version Altair. @@ -176,28 +175,28 @@ func TestReplayBlocks_ThroughFutureForkBoundaries(t *testing.T) { service := New(testDB.SetupDB(t), doublylinkedtree.New()) targetSlot := params.BeaconConfig().SlotsPerEpoch * 2 - newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) + newState, err := service.replayBlocks(t.Context(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) require.NoError(t, err) // Verify state is version Bellatrix. assert.Equal(t, version.Bellatrix, newState.Version()) targetSlot = params.BeaconConfig().SlotsPerEpoch * 3 - newState, err = service.replayBlocks(context.Background(), newState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) + newState, err = service.replayBlocks(t.Context(), newState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) require.NoError(t, err) // Verify state is version Capella. assert.Equal(t, version.Capella, newState.Version()) targetSlot = params.BeaconConfig().SlotsPerEpoch * 4 - newState, err = service.replayBlocks(context.Background(), newState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) + newState, err = service.replayBlocks(t.Context(), newState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) require.NoError(t, err) // Verify state is version Deneb. assert.Equal(t, version.Deneb, newState.Version()) targetSlot = params.BeaconConfig().SlotsPerEpoch * 5 - newState, err = service.replayBlocks(context.Background(), newState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) + newState, err = service.replayBlocks(t.Context(), newState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) require.NoError(t, err) // Verify state is version Electra. @@ -251,7 +250,7 @@ func TestReplayBlocks_ProcessEpoch_Electra(t *testing.T) { require.Equal(t, params.BeaconConfig().MinActivationBalance, beaconState.Balances()[0]) service := New(testDB.SetupDB(t), doublylinkedtree.New()) targetSlot := (params.BeaconConfig().SlotsPerEpoch * 2) - 1 - newState, err := service.replayBlocks(context.Background(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) + newState, err := service.replayBlocks(t.Context(), beaconState, []interfaces.ReadOnlySignedBeaconBlock{}, targetSlot) require.NoError(t, err) require.Equal(t, version.Electra, newState.Version()) @@ -268,7 +267,7 @@ func TestReplayBlocks_ProcessEpoch_Electra(t *testing.T) { func TestLoadBlocks_FirstBranch(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() s := &State{ beaconDB: beaconDB, } @@ -300,7 +299,7 @@ func TestLoadBlocks_FirstBranch(t *testing.T) { func TestLoadBlocks_SecondBranch(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() s := &State{ beaconDB: beaconDB, } @@ -329,7 +328,7 @@ func TestLoadBlocks_SecondBranch(t *testing.T) { func TestLoadBlocks_ThirdBranch(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() s := &State{ beaconDB: beaconDB, } @@ -362,7 +361,7 @@ func TestLoadBlocks_ThirdBranch(t *testing.T) { func TestLoadBlocks_SameSlots(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() s := &State{ beaconDB: beaconDB, } @@ -392,7 +391,7 @@ func TestLoadBlocks_SameSlots(t *testing.T) { func TestLoadBlocks_SameEndSlots(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() s := &State{ beaconDB: beaconDB, } @@ -421,7 +420,7 @@ func TestLoadBlocks_SameEndSlots(t *testing.T) { func TestLoadBlocks_SameEndSlotsWith2blocks(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() s := &State{ beaconDB: beaconDB, } @@ -526,10 +525,10 @@ func tree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32) wsb, err := consensusblocks.NewSignedBeaconBlock(beaconBlock) require.NoError(t, err) - if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil { + if err := beaconDB.SaveBlock(t.Context(), wsb); err != nil { return nil, nil, err } - if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { + if err := beaconDB.SaveState(t.Context(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { return nil, nil, err } returnedBlocks = append(returnedBlocks, beaconBlock) @@ -609,10 +608,10 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, beaconBlock.Block.StateRoot = bytesutil.PadTo(b.Block.StateRoot, 32) wsb, err := consensusblocks.NewSignedBeaconBlock(beaconBlock) require.NoError(t, err) - if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil { + if err := beaconDB.SaveBlock(t.Context(), wsb); err != nil { return nil, nil, err } - if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { + if err := beaconDB.SaveState(t.Context(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { return nil, nil, err } returnedBlocks = append(returnedBlocks, beaconBlock) @@ -685,10 +684,10 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, beaconBlock.Block.StateRoot = bytesutil.PadTo(b.Block.StateRoot, 32) wsb, err := consensusblocks.NewSignedBeaconBlock(beaconBlock) require.NoError(t, err) - if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil { + if err := beaconDB.SaveBlock(t.Context(), wsb); err != nil { return nil, nil, err } - if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { + if err := beaconDB.SaveState(t.Context(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { return nil, nil, err } returnedBlocks = append(returnedBlocks, beaconBlock) @@ -755,10 +754,10 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, beaconBlock.Block.StateRoot = bytesutil.PadTo(b.Block.StateRoot, 32) wsb, err := consensusblocks.NewSignedBeaconBlock(beaconBlock) require.NoError(t, err) - if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil { + if err := beaconDB.SaveBlock(t.Context(), wsb); err != nil { return nil, nil, err } - if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { + if err := beaconDB.SaveState(t.Context(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil { return nil, nil, err } returnedBlocks = append(returnedBlocks, beaconBlock) @@ -769,7 +768,7 @@ func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, func TestLoadFinalizedBlocks(t *testing.T) { beaconDB := testDB.SetupDB(t) - ctx := context.Background() + ctx := t.Context() s := &State{ beaconDB: beaconDB, } diff --git a/beacon-chain/state/stategen/replayer_test.go b/beacon-chain/state/stategen/replayer_test.go index 10c4dacb9d..4bbabfe395 100644 --- a/beacon-chain/state/stategen/replayer_test.go +++ b/beacon-chain/state/stategen/replayer_test.go @@ -1,7 +1,6 @@ package stategen import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/consensus-types/interfaces" @@ -30,7 +29,7 @@ func headerFromBlock(b interfaces.ReadOnlySignedBeaconBlock) (*ethpb.BeaconBlock func TestReplayBlocks_ZeroDiff(t *testing.T) { logHook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() specs := []mockHistorySpec{{slot: 0}} hist := newMockHistory(t, specs, 0) ch := NewCanonicalHistory(hist, hist, hist) @@ -40,7 +39,7 @@ func TestReplayBlocks_ZeroDiff(t *testing.T) { } func TestReplayBlocks(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var zero, one, two, three, four, five primitives.Slot = 50, 51, 150, 151, 152, 200 specs := []mockHistorySpec{ {slot: zero}, @@ -89,7 +88,7 @@ func TestReplayBlocks(t *testing.T) { } func TestReplayToSlot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var zero, one, two, three, four, five primitives.Slot = 50, 51, 150, 151, 152, 200 specs := []mockHistorySpec{ {slot: zero}, diff --git a/beacon-chain/state/stategen/service_test.go b/beacon-chain/state/stategen/service_test.go index 2efd4e6eaa..98f060f185 100644 --- a/beacon-chain/state/stategen/service_test.go +++ b/beacon-chain/state/stategen/service_test.go @@ -1,7 +1,6 @@ package stategen import ( - "context" "testing" testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing" @@ -14,7 +13,7 @@ import ( ) func TestResume(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) diff --git a/beacon-chain/state/stategen/setter_test.go b/beacon-chain/state/stategen/setter_test.go index c669251753..9d320ba8ae 100644 --- a/beacon-chain/state/stategen/setter_test.go +++ b/beacon-chain/state/stategen/setter_test.go @@ -1,7 +1,6 @@ package stategen import ( - "context" "testing" testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing" @@ -15,7 +14,7 @@ import ( ) func TestSaveState_HotStateCanBeSaved(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -36,7 +35,7 @@ func TestSaveState_HotStateCanBeSaved(t *testing.T) { func TestSaveState_HotStateCached(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -56,7 +55,7 @@ func TestSaveState_HotStateCached(t *testing.T) { } func TestState_ForceCheckpoint_SavesStateToDatabase(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) svc := New(beaconDB, doublylinkedtree.New()) @@ -76,7 +75,7 @@ func TestState_ForceCheckpoint_SavesStateToDatabase(t *testing.T) { func TestSaveState_Alreadyhas(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -95,7 +94,7 @@ func TestSaveState_Alreadyhas(t *testing.T) { } func TestSaveState_CanSaveOnEpochBoundary(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -116,7 +115,7 @@ func TestSaveState_CanSaveOnEpochBoundary(t *testing.T) { func TestSaveState_NoSaveNotEpochBoundary(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -139,7 +138,7 @@ func TestSaveState_NoSaveNotEpochBoundary(t *testing.T) { } func TestSaveState_RecoverForEpochBoundary(t *testing.T) { - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -168,7 +167,7 @@ func TestSaveState_RecoverForEpochBoundary(t *testing.T) { func TestSaveState_CanSaveHotStateToDB(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) service.EnableSaveHotStateToDB(ctx) @@ -185,7 +184,7 @@ func TestSaveState_CanSaveHotStateToDB(t *testing.T) { func TestEnableSaveHotStateToDB_Enabled(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) @@ -196,7 +195,7 @@ func TestEnableSaveHotStateToDB_Enabled(t *testing.T) { func TestEnableSaveHotStateToDB_AlreadyEnabled(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) service.saveHotStateDB.enabled = true @@ -207,7 +206,7 @@ func TestEnableSaveHotStateToDB_AlreadyEnabled(t *testing.T) { func TestEnableSaveHotStateToDB_Disabled(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) service.saveHotStateDB.enabled = true @@ -224,7 +223,7 @@ func TestEnableSaveHotStateToDB_Disabled(t *testing.T) { func TestEnableSaveHotStateToDB_AlreadyDisabled(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() beaconDB := testDB.SetupDB(t) service := New(beaconDB, doublylinkedtree.New()) require.NoError(t, service.DisableSaveHotStateToDB(ctx)) diff --git a/beacon-chain/state/stateutil/state_root_test.go b/beacon-chain/state/stateutil/state_root_test.go index 37e6b8f3b6..88f93f799f 100644 --- a/beacon-chain/state/stateutil/state_root_test.go +++ b/beacon-chain/state/stateutil/state_root_test.go @@ -1,7 +1,6 @@ package stateutil_test import ( - "context" "reflect" "strconv" "testing" @@ -59,9 +58,9 @@ func BenchmarkHashTreeRoot_Generic_300000(b *testing.B) { } } -func setupGenesisState(tb testing.TB, count uint64) *ethpb.BeaconState { - genesisState, _, err := interop.GenerateGenesisState(context.Background(), 0, 1) - require.NoError(tb, err, "Could not generate genesis beacon state") +func setupGenesisState(t testing.TB, count uint64) *ethpb.BeaconState { + genesisState, _, err := interop.GenerateGenesisState(t.Context(), 0, 1) + require.NoError(t, err, "Could not generate genesis beacon state") for i := uint64(1); i < count; i++ { var someRoot [32]byte var someKey [fieldparams.BLSPubkeyLength]byte diff --git a/beacon-chain/sync/backfill/batch_test.go b/beacon-chain/sync/backfill/batch_test.go index ad25c4d2fb..01f9b3c1cd 100644 --- a/beacon-chain/sync/backfill/batch_test.go +++ b/beacon-chain/sync/backfill/batch_test.go @@ -35,7 +35,7 @@ func TestWaitUntilReady(t *testing.T) { } // retries counter and timestamp are set when we mark the batch for sequencing, if it is in the retry state b = b.withState(batchSequenced) - require.ErrorIs(t, b.waitUntilReady(context.Background()), errDerp) + require.ErrorIs(t, b.waitUntilReady(t.Context()), errDerp) require.Equal(t, true, retryDelay-time.Until(b.retryAfter) < time.Millisecond) require.Equal(t, true, got < retryDelay && got > retryDelay-time.Millisecond) require.Equal(t, 1, b.retries) diff --git a/beacon-chain/sync/backfill/pool_test.go b/beacon-chain/sync/backfill/pool_test.go index 586c69c6bc..e5df3fbd67 100644 --- a/beacon-chain/sync/backfill/pool_test.go +++ b/beacon-chain/sync/backfill/pool_test.go @@ -40,7 +40,7 @@ func mockNewBlobVerifier(_ blocks.ROBlob, _ []verification.Requirement) verifica func TestPoolDetectAllEnded(t *testing.T) { nw := 5 p2p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() ma := &mockAssigner{} pool := newP2PBatchWorkerPool(p2p, nw) st, err := util.NewBeaconState() diff --git a/beacon-chain/sync/backfill/service_test.go b/beacon-chain/sync/backfill/service_test.go index f85a2333d2..d6ba959837 100644 --- a/beacon-chain/sync/backfill/service_test.go +++ b/beacon-chain/sync/backfill/service_test.go @@ -34,7 +34,7 @@ func (*mockInitalizerWaiter) WaitForInitializer(_ context.Context) (*verificatio } func TestServiceInit(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*300) + ctx, cancel := context.WithTimeout(t.Context(), time.Second*300) defer cancel() db := &mockBackfillDB{} su, err := NewUpdater(ctx, db) diff --git a/beacon-chain/sync/backfill/status_test.go b/beacon-chain/sync/backfill/status_test.go index be988c69ed..70280b1998 100644 --- a/beacon-chain/sync/backfill/status_test.go +++ b/beacon-chain/sync/backfill/status_test.go @@ -130,7 +130,7 @@ func TestSlotCovered(t *testing.T) { } func TestStatusUpdater_FillBack(t *testing.T) { - ctx := context.Background() + ctx := t.Context() mdb := &mockBackfillDB{} b, err := setupTestBlock(90) require.NoError(t, err) @@ -159,7 +159,7 @@ func setupTestBlock(slot primitives.Slot) (interfaces.ReadOnlySignedBeaconBlock, } func TestNewUpdater(t *testing.T) { - ctx := context.Background() + ctx := t.Context() originSlot := primitives.Slot(100) var originRoot [32]byte diff --git a/beacon-chain/sync/batch_verifier_test.go b/beacon-chain/sync/batch_verifier_test.go index d068fe3fd2..23f8478db5 100644 --- a/beacon-chain/sync/batch_verifier_test.go +++ b/beacon-chain/sync/batch_verifier_test.go @@ -64,7 +64,7 @@ func TestValidateWithBatchVerifier(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) svc := &Service{ ctx: ctx, cancel: cancel, @@ -74,7 +74,7 @@ func TestValidateWithBatchVerifier(t *testing.T) { for _, st := range tt.preFilledSets { svc.signatureChan <- &signatureVerifier{set: st, resChan: make(chan error, 10)} } - got, err := svc.validateWithBatchVerifier(context.Background(), tt.message, tt.set) + got, err := svc.validateWithBatchVerifier(t.Context(), tt.message, tt.set) if got != tt.want { t.Errorf("validateWithBatchVerifier() = %v, want %v", got, tt.want) } diff --git a/beacon-chain/sync/blobs_test.go b/beacon-chain/sync/blobs_test.go index 22a94c2b90..c1abc5af07 100644 --- a/beacon-chain/sync/blobs_test.go +++ b/beacon-chain/sync/blobs_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "encoding/binary" "math" "math/big" @@ -208,7 +207,7 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func()) root, err := block.Block.HashTreeRoot() require.NoError(t, err) sidecars = append(sidecars, bsc...) - util.SaveBlock(t, context.Background(), d, block) + util.SaveBlock(t, t.Context(), d, block) parentRoot = root } @@ -301,7 +300,7 @@ func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) { } func TestTestcaseSetup_BlocksAndBlobs(t *testing.T) { - ctx := context.Background() + ctx := t.Context() nblocks := 10 c := &blobsTestCase{nblocks: nblocks} c.oldestSlot = c.defaultOldestSlotByRoot diff --git a/beacon-chain/sync/broadcast_bls_changes_test.go b/beacon-chain/sync/broadcast_bls_changes_test.go index 02750a216b..c3bf726a95 100644 --- a/beacon-chain/sync/broadcast_bls_changes_test.go +++ b/beacon-chain/sync/broadcast_bls_changes_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "testing" "time" @@ -33,7 +32,7 @@ func TestBroadcastBLSChanges(t *testing.T) { Genesis: time.Now(), ValidatorsRoot: [32]byte{'A'}, } - s := NewService(context.Background(), + s := NewService(t.Context(), WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), @@ -66,7 +65,7 @@ func TestRateBLSChanges(t *testing.T) { ValidatorsRoot: [32]byte{'A'}, } p1 := mockp2p.NewTestP2P(t) - s := NewService(context.Background(), + s := NewService(t.Context(), WithP2P(p1), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), @@ -135,7 +134,7 @@ func TestBroadcastBLSBatch_changes_slice(t *testing.T) { Genesis: time.Now(), ValidatorsRoot: [32]byte{'A'}, } - s := NewService(context.Background(), + s := NewService(t.Context(), WithP2P(p1), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), diff --git a/beacon-chain/sync/checkpoint/api_test.go b/beacon-chain/sync/checkpoint/api_test.go index 0e022c7410..ebe6d2e5c4 100644 --- a/beacon-chain/sync/checkpoint/api_test.go +++ b/beacon-chain/sync/checkpoint/api_test.go @@ -2,7 +2,6 @@ package checkpoint import ( "bytes" - "context" "io" "net/http" "testing" @@ -21,7 +20,7 @@ import ( ) func TestDownloadFinalizedData(t *testing.T) { - ctx := context.Background() + ctx := t.Context() cfg := params.MainnetConfig() // avoid the altair zone because genesis tests are easier to set up diff --git a/beacon-chain/sync/checkpoint/weak-subjectivity_test.go b/beacon-chain/sync/checkpoint/weak-subjectivity_test.go index 28d1c80a3c..9973d768a1 100644 --- a/beacon-chain/sync/checkpoint/weak-subjectivity_test.go +++ b/beacon-chain/sync/checkpoint/weak-subjectivity_test.go @@ -2,7 +2,6 @@ package checkpoint import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -75,7 +74,7 @@ func TestFname(t *testing.T) { } func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) { - ctx := context.Background() + ctx := t.Context() cfg := params.MainnetConfig() epoch := cfg.AltairForkEpoch - 1 @@ -171,7 +170,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) { // runs computeBackwardsCompatible directly // and via ComputeWeakSubjectivityCheckpoint with a round tripper that triggers the backwards compatible code path func TestDownloadBackwardsCompatibleCombined(t *testing.T) { - ctx := context.Background() + ctx := t.Context() cfg := params.MainnetConfig() st, expectedEpoch := defaultTestHeadState(t, cfg) @@ -275,7 +274,7 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) { }} c, err := beacon.NewClient("http://localhost:3500", client.WithRoundTripper(trans)) require.NoError(t, err) - actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c) + actualEpoch, err := getWeakSubjectivityEpochFromHead(t.Context(), c) require.NoError(t, err) require.Equal(t, expectedEpoch, actualEpoch) } diff --git a/beacon-chain/sync/context_test.go b/beacon-chain/sync/context_test.go index c7fa6694d0..0ecb33d0e3 100644 --- a/beacon-chain/sync/context_test.go +++ b/beacon-chain/sync/context_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "sync" "testing" "time" @@ -27,7 +26,7 @@ func TestContextWrite_NoWrites(t *testing.T) { wg.Done() // no-op }) - strm, err := p1.BHost.NewStream(context.Background(), nPeer.PeerID(), p2p.RPCPingTopicV1) + strm, err := p1.BHost.NewStream(t.Context(), nPeer.PeerID(), p2p.RPCPingTopicV1) assert.NoError(t, err) // Nothing will be written to the stream @@ -61,7 +60,7 @@ func TestContextRead_NoReads(t *testing.T) { wg.Done() }) - strm, err := p1.BHost.NewStream(context.Background(), nPeer.PeerID(), p2p.RPCPingTopicV1) + strm, err := p1.BHost.NewStream(t.Context(), nPeer.PeerID(), p2p.RPCPingTopicV1) assert.NoError(t, err) n, err := strm.Write(wantedData) diff --git a/beacon-chain/sync/fork_watcher_test.go b/beacon-chain/sync/fork_watcher_test.go index 05da5cb055..afa28810da 100644 --- a/beacon-chain/sync/fork_watcher_test.go +++ b/beacon-chain/sync/fork_watcher_test.go @@ -37,7 +37,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) { Genesis: gt, ValidatorsRoot: vr, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cancel: cancel, @@ -72,7 +72,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) { bCfg.AltairForkEpoch = 5 params.OverrideBeaconConfig(bCfg) params.BeaconConfig().InitializeForkSchedule() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cancel: cancel, @@ -116,7 +116,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) { bCfg.BellatrixForkEpoch = 5 params.OverrideBeaconConfig(bCfg) params.BeaconConfig().InitializeForkSchedule() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cancel: cancel, @@ -158,7 +158,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) { bCfg.DenebForkEpoch = 5 params.OverrideBeaconConfig(bCfg) params.BeaconConfig().InitializeForkSchedule() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cancel: cancel, @@ -202,7 +202,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) { bCfg.ElectraForkEpoch = 5 params.OverrideBeaconConfig(bCfg) params.BeaconConfig().InitializeForkSchedule() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cancel: cancel, @@ -246,7 +246,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) { bCfg.FuluForkEpoch = 5 params.OverrideBeaconConfig(bCfg) params.BeaconConfig().InitializeForkSchedule() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cancel: cancel, @@ -306,7 +306,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { ValidatorsRoot: [32]byte{'A'}, } clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cancel: cancel, @@ -352,7 +352,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { bCfg.AltairForkEpoch = 3 params.OverrideBeaconConfig(bCfg) params.BeaconConfig().InitializeForkSchedule() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cancel: cancel, @@ -439,7 +439,7 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) { bCfg.BellatrixForkEpoch = 3 params.OverrideBeaconConfig(bCfg) params.BeaconConfig().InitializeForkSchedule() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cancel: cancel, diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go index 06f260ab6c..4bf2a91de8 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go @@ -23,7 +23,7 @@ func TestBlocksFetcher_selectFailOverPeer(t *testing.T) { excludedPID peer.ID peers []peer.ID } - fetcher := newBlocksFetcher(context.Background(), &blocksFetcherConfig{}) + fetcher := newBlocksFetcher(t.Context(), &blocksFetcherConfig{}) tests := []struct { name string args args @@ -231,7 +231,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, []*peerData{}) - fetcher := newBlocksFetcher(context.Background(), &blocksFetcherConfig{ + fetcher := newBlocksFetcher(t.Context(), &blocksFetcherConfig{ chain: mc, p2p: p2p, peerFilterCapacityWeight: tt.args.capacityWeight, @@ -253,7 +253,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) { var filteredPIDs []peer.ID var err error for i := 0; i < 1000; i++ { - filteredPIDs = fetcher.filterPeers(context.Background(), peerIDs, tt.args.peersPercentage) + filteredPIDs = fetcher.filterPeers(t.Context(), peerIDs, tt.args.peersPercentage) if len(filteredPIDs) <= 1 { break } @@ -399,7 +399,7 @@ func TestBlocksFetcher_removeStalePeerLocks(t *testing.T) { }, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{}) diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go index 73b34d5c3e..db7e527ae4 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go @@ -39,7 +39,7 @@ import ( func TestBlocksFetcher_InitStartStop(t *testing.T) { mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, []*peerData{}) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher( ctx, @@ -66,7 +66,7 @@ func TestBlocksFetcher_InitStartStop(t *testing.T) { t.Run("multiple stopping attempts", func(t *testing.T) { fetcher := newBlocksFetcher( - context.Background(), + t.Context(), &blocksFetcherConfig{ chain: mc, p2p: p2p, @@ -77,7 +77,7 @@ func TestBlocksFetcher_InitStartStop(t *testing.T) { }) t.Run("cancellation", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) fetcher := newBlocksFetcher( ctx, &blocksFetcherConfig{ @@ -90,7 +90,7 @@ func TestBlocksFetcher_InitStartStop(t *testing.T) { }) t.Run("peer filter capacity weight", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher( ctx, @@ -272,7 +272,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) { genesisRoot := cache.rootCache[0] cache.RUnlock() - util.SaveBlock(t, context.Background(), beaconDB, util.NewBeaconBlock()) + util.SaveBlock(t, t.Context(), beaconDB, util.NewBeaconBlock()) st, err := util.NewBeaconState() require.NoError(t, err) @@ -291,7 +291,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) { ValidatorsRoot: [32]byte{}, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ chain: mc, p2p: p, @@ -340,7 +340,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) { maxExpectedBlocks := uint64(0) for _, requestParams := range tt.requests { - err = fetcher.scheduleRequest(context.Background(), requestParams.start, requestParams.count) + err = fetcher.scheduleRequest(t.Context(), requestParams.start, requestParams.count) assert.NoError(t, err) maxExpectedBlocks += requestParams.count } @@ -378,16 +378,16 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) { func TestBlocksFetcher_scheduleRequest(t *testing.T) { blockBatchLimit := uint64(flags.Get().BlockBatchLimit) t.Run("context cancellation", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{}) cancel() assert.ErrorContains(t, "context canceled", fetcher.scheduleRequest(ctx, 1, blockBatchLimit)) }) t.Run("unblock on context cancellation", func(t *testing.T) { - fetcher := newBlocksFetcher(context.Background(), &blocksFetcherConfig{}) + fetcher := newBlocksFetcher(t.Context(), &blocksFetcherConfig{}) for i := 0; i < maxPendingRequests; i++ { - assert.NoError(t, fetcher.scheduleRequest(context.Background(), 1, blockBatchLimit)) + assert.NoError(t, fetcher.scheduleRequest(t.Context(), 1, blockBatchLimit)) } // Will block on next request (and wait until requests are either processed or context is closed). @@ -395,7 +395,7 @@ func TestBlocksFetcher_scheduleRequest(t *testing.T) { fetcher.cancel() }() assert.ErrorContains(t, errFetcherCtxIsDone.Error(), - fetcher.scheduleRequest(context.Background(), 1, blockBatchLimit)) + fetcher.scheduleRequest(t.Context(), 1, blockBatchLimit)) }) } func TestBlocksFetcher_handleRequest(t *testing.T) { @@ -424,7 +424,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { mc.Genesis = time.Now() t.Run("context cancellation", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ chain: mc, p2p: p2p, @@ -437,7 +437,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { }) t.Run("receive blocks", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ chain: mc, @@ -445,7 +445,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot), }) - requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second) + requestCtx, reqCancel := context.WithTimeout(t.Context(), 2*time.Second) defer reqCancel() go func() { response := fetcher.handleRequest(requestCtx, 1 /* start */, uint64(blockBatchLimit) /* count */) @@ -503,7 +503,7 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) { } mc, p2p, _ := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher( @@ -524,7 +524,7 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) { assert.Equal(t, uint64(blockBatchLimit), uint64(len(blocks)), "Incorrect number of blocks returned") // Test context cancellation. - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(t.Context()) cancel() _, err = fetcher.requestBlocks(ctx, req, peerIDs[0]) assert.ErrorContains(t, "context canceled", err) @@ -553,7 +553,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) { burstFactor := uint64(flags.Get().BlockBatchLimitBurstFactor) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1}) fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), 1*time.Second, false) @@ -619,7 +619,7 @@ func TestBlocksFetcher_WaitForBandwidth(t *testing.T) { burstFactor := uint64(flags.Get().BlockBatchLimitBurstFactor) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1}) fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), 5*time.Second, false) @@ -886,7 +886,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) topic := p2pm.RPCBlocksByRangeTopicV1 protocol := libp2pcore.ProtocolID(topic + p1.Encoding().ProtocolSuffix()) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1, chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}}) fetcher.rateLimiter = leakybucket.NewCollector(0.000001, 640, 1*time.Second, false) @@ -1285,7 +1285,7 @@ func TestBatchLimit(t *testing.T) { } func TestBlockFetcher_HasSufficientBandwidth(t *testing.T) { - bf := newBlocksFetcher(context.Background(), &blocksFetcherConfig{}) + bf := newBlocksFetcher(t.Context(), &blocksFetcherConfig{}) currCap := bf.rateLimiter.Capacity() wantedAmt := currCap - 100 bf.rateLimiter.Add(peer.ID("a").String(), wantedAmt) diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go index f66669b18b..e27e5543f0 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go @@ -51,7 +51,7 @@ func TestBlocksFetcher_nonSkippedSlotAfter(t *testing.T) { } mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, chainConfig.peers) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher( @@ -165,7 +165,7 @@ func TestBlocksFetcher_findFork(t *testing.T) { finalizedEpoch := slots.ToEpoch(finalizedSlot) genesisBlock := chain1[0] - util.SaveBlock(t, context.Background(), beaconDB, genesisBlock) + util.SaveBlock(t, t.Context(), beaconDB, genesisBlock) genesisRoot, err := genesisBlock.Block.HashTreeRoot() require.NoError(t, err) @@ -183,7 +183,7 @@ func TestBlocksFetcher_findFork(t *testing.T) { ValidatorsRoot: [32]byte{}, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher( ctx, @@ -337,7 +337,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) { knownBlocks := extendBlockSequence(t, []*ethpb.SignedBeaconBlock{}, 128) genesisBlock := knownBlocks[0] - util.SaveBlock(t, context.Background(), beaconDB, genesisBlock) + util.SaveBlock(t, t.Context(), beaconDB, genesisBlock) genesisRoot, err := genesisBlock.Block.HashTreeRoot() require.NoError(t, err) @@ -351,7 +351,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) { ValidatorsRoot: [32]byte{}, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher( ctx, @@ -475,7 +475,7 @@ func TestBlocksFetcher_findAncestor(t *testing.T) { finalizedEpoch := slots.ToEpoch(finalizedSlot) genesisBlock := knownBlocks[0] - util.SaveBlock(t, context.Background(), beaconDB, genesisBlock) + util.SaveBlock(t, t.Context(), beaconDB, genesisBlock) genesisRoot, err := genesisBlock.Block.HashTreeRoot() require.NoError(t, err) @@ -493,7 +493,7 @@ func TestBlocksFetcher_findAncestor(t *testing.T) { ValidatorsRoot: [32]byte{}, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher( ctx, @@ -612,7 +612,7 @@ func TestBlocksFetcher_currentHeadAndTargetEpochs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, tt.peers) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher( ctx, diff --git a/beacon-chain/sync/initial-sync/blocks_queue_test.go b/beacon-chain/sync/initial-sync/blocks_queue_test.go index 9aeaba962d..adffc2e8dd 100644 --- a/beacon-chain/sync/initial-sync/blocks_queue_test.go +++ b/beacon-chain/sync/initial-sync/blocks_queue_test.go @@ -33,7 +33,7 @@ func TestBlocksQueue_InitStartStop(t *testing.T) { blockBatchLimit := flags.Get().BlockBatchLimit mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, []*peerData{}) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ chain: mc, @@ -123,7 +123,7 @@ func TestBlocksQueue_InitStartStop(t *testing.T) { }) t.Run("cancellation", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) queue := newBlocksQueue(ctx, &blocksQueueConfig{ blocksFetcher: fetcher, chain: mc, @@ -249,7 +249,7 @@ func TestBlocksQueue_Loop(t *testing.T) { t.Run(tt.name, func(t *testing.T) { mc, p2p, beaconDB := initializeTestServices(t, tt.expectedBlockSlots, tt.peers) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ @@ -308,7 +308,7 @@ func TestBlocksQueue_onScheduleEvent(t *testing.T) { blockBatchLimit := flags.Get().BlockBatchLimit mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, []*peerData{}) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ chain: mc, @@ -374,7 +374,7 @@ func TestBlocksQueue_onScheduleEvent(t *testing.T) { highestExpectedSlot: primitives.Slot(blockBatchLimit), }) // Cancel to make fetcher spit error when trying to schedule next FSM. - requestCtx, requestCtxCancel := context.WithCancel(context.Background()) + requestCtx, requestCtxCancel := context.WithCancel(t.Context()) requestCtxCancel() handlerFn := queue.onScheduleEvent(requestCtx) updatedState, err := handlerFn(&stateMachine{ @@ -403,7 +403,7 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) { blockBatchLimit := flags.Get().BlockBatchLimit mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, []*peerData{}) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ chain: mc, @@ -562,7 +562,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) { blockBatchLimit := flags.Get().BlockBatchLimit mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, []*peerData{}) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ chain: mc, @@ -720,7 +720,7 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) { blockBatchLimit := flags.Get().BlockBatchLimit mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, []*peerData{}) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ chain: mc, @@ -973,7 +973,7 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) { } func TestBlocksQueue_onCheckStaleEvent(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() t.Run("expired context", func(t *testing.T) { @@ -1037,7 +1037,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) { finalizedEpoch := slots.ToEpoch(finalizedSlot) genesisBlock := chain1[0] - util.SaveBlock(t, context.Background(), beaconDB, genesisBlock) + util.SaveBlock(t, t.Context(), beaconDB, genesisBlock) genesisRoot, err := genesisBlock.Block.HashTreeRoot() require.NoError(t, err) @@ -1055,7 +1055,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) { ValidatorsRoot: [32]byte{}, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() fetcher := newBlocksFetcher( ctx, @@ -1229,7 +1229,7 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) { } func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() beaconDB := dbtest.SetupDB(t) @@ -1240,7 +1240,7 @@ func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) { finalizedEpoch := slots.ToEpoch(finalizedSlot) genesisBlock := chain[0] - util.SaveBlock(t, context.Background(), beaconDB, genesisBlock) + util.SaveBlock(t, t.Context(), beaconDB, genesisBlock) genesisRoot, err := genesisBlock.Block.HashTreeRoot() require.NoError(t, err) diff --git a/beacon-chain/sync/initial-sync/downscore_test.go b/beacon-chain/sync/initial-sync/downscore_test.go index be5ebda2d0..a6b21b4102 100644 --- a/beacon-chain/sync/initial-sync/downscore_test.go +++ b/beacon-chain/sync/initial-sync/downscore_test.go @@ -1,7 +1,6 @@ package initialsync import ( - "context" "testing" "time" @@ -152,7 +151,7 @@ func TestOnDataReceivedDownscore(t *testing.T) { if c.downPeer == testDownscoreBlob { require.Equal(t, true, verification.IsBlobValidationFailure(c.err)) } - ctx := context.Background() + ctx := t.Context() p2p := p2pt.NewTestP2P(t) mc := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ @@ -167,7 +166,7 @@ func TestOnDataReceivedDownscore(t *testing.T) { chain: mc}) sm := q.smm.addStateMachine(0) sm.state = stateScheduled - handle := q.onDataReceivedEvent(context.Background()) + handle := q.onDataReceivedEvent(t.Context()) endState, err := handle(sm, data) if c.err != nil { require.ErrorIs(t, err, c.err) diff --git a/beacon-chain/sync/initial-sync/initial_sync_test.go b/beacon-chain/sync/initial-sync/initial_sync_test.go index 3750a09368..006c375a6c 100644 --- a/beacon-chain/sync/initial-sync/initial_sync_test.go +++ b/beacon-chain/sync/initial-sync/initial_sync_test.go @@ -1,7 +1,6 @@ package initialsync import ( - "context" "fmt" "io" "os" @@ -84,7 +83,7 @@ func initializeTestServices(t *testing.T, slots []primitives.Slot, peers []*peer genesisRoot := cache.rootCache[0] cache.RUnlock() - util.SaveBlock(t, context.Background(), beaconDB, util.NewBeaconBlock()) + util.SaveBlock(t, t.Context(), beaconDB, util.NewBeaconBlock()) st, err := util.NewBeaconState() require.NoError(t, err) diff --git a/beacon-chain/sync/initial-sync/round_robin_test.go b/beacon-chain/sync/initial-sync/round_robin_test.go index 69e8536273..68ee064ad3 100644 --- a/beacon-chain/sync/initial-sync/round_robin_test.go +++ b/beacon-chain/sync/initial-sync/round_robin_test.go @@ -290,7 +290,7 @@ func TestService_roundRobinSync(t *testing.T) { genesisRoot := cache.rootCache[0] cache.RUnlock() - util.SaveBlock(t, context.Background(), beaconDB, util.NewBeaconBlock()) + util.SaveBlock(t, t.Context(), beaconDB, util.NewBeaconBlock()) st, err := util.NewBeaconState() require.NoError(t, err) @@ -308,7 +308,7 @@ func TestService_roundRobinSync(t *testing.T) { } // no-op mock clock := startup.NewClock(gt, vr) s := &Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &Config{Chain: mc, P2P: p, DB: beaconDB}, synced: abool.New(), chainStarted: abool.NewBool(true), @@ -337,10 +337,10 @@ func TestService_processBlock(t *testing.T) { genesisBlk := util.NewBeaconBlock() genesisBlkRoot, err := genesisBlk.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, genesisBlk) + util.SaveBlock(t, t.Context(), beaconDB, genesisBlk) st, err := util.NewBeaconState() require.NoError(t, err) - s := NewService(context.Background(), &Config{ + s := NewService(t.Context(), &Config{ P2P: p2pt.NewTestP2P(t), DB: beaconDB, Chain: &mock.ChainService{ @@ -355,7 +355,7 @@ func TestService_processBlock(t *testing.T) { }, StateNotifier: &mock.MockStateNotifier{}, }) - ctx := context.Background() + ctx := t.Context() genesis := makeGenesisTime(32) t.Run("process duplicate block", func(t *testing.T) { @@ -411,10 +411,10 @@ func TestService_processBlockBatch(t *testing.T) { genesisBlk := util.NewBeaconBlock() genesisBlkRoot, err := genesisBlk.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, genesisBlk) + util.SaveBlock(t, t.Context(), beaconDB, genesisBlk) st, err := util.NewBeaconState() require.NoError(t, err) - s := NewService(context.Background(), &Config{ + s := NewService(t.Context(), &Config{ P2P: p2pt.NewTestP2P(t), DB: beaconDB, Chain: &mock.ChainService{ @@ -427,7 +427,7 @@ func TestService_processBlockBatch(t *testing.T) { }, StateNotifier: &mock.MockStateNotifier{}, }) - ctx := context.Background() + ctx := t.Context() genesis := makeGenesisTime(32) s.genesisTime = genesis @@ -441,7 +441,7 @@ func TestService_processBlockBatch(t *testing.T) { blk1.Block.ParentRoot = parentRoot[:] blk1Root, err := blk1.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, blk1) + util.SaveBlock(t, t.Context(), beaconDB, blk1) wsb, err := blocks.NewSignedBeaconBlock(blk1) require.NoError(t, err) rowsb, err := blocks.NewROBlock(wsb) @@ -458,7 +458,7 @@ func TestService_processBlockBatch(t *testing.T) { blk1.Block.ParentRoot = parentRoot[:] blk1Root, err := blk1.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, blk1) + util.SaveBlock(t, t.Context(), beaconDB, blk1) wsb, err := blocks.NewSignedBeaconBlock(blk1) require.NoError(t, err) rowsb, err := blocks.NewROBlock(wsb) @@ -548,7 +548,7 @@ func TestService_blockProviderScoring(t *testing.T) { genesisRoot := cache.rootCache[0] cache.RUnlock() - util.SaveBlock(t, context.Background(), beaconDB, util.NewBeaconBlock()) + util.SaveBlock(t, t.Context(), beaconDB, util.NewBeaconBlock()) st, err := util.NewBeaconState() require.NoError(t, err) @@ -568,7 +568,7 @@ func TestService_blockProviderScoring(t *testing.T) { } // no-op mock clock := startup.NewClock(gt, vr) s := &Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &Config{Chain: mc, P2P: p, DB: beaconDB}, synced: abool.New(), chainStarted: abool.NewBool(true), @@ -618,7 +618,7 @@ func TestService_syncToFinalizedEpoch(t *testing.T) { genesisRoot := cache.rootCache[0] cache.RUnlock() - util.SaveBlock(t, context.Background(), beaconDB, util.NewBeaconBlock()) + util.SaveBlock(t, t.Context(), beaconDB, util.NewBeaconBlock()) st, err := util.NewBeaconState() require.NoError(t, err) @@ -637,7 +637,7 @@ func TestService_syncToFinalizedEpoch(t *testing.T) { ValidatorsRoot: vr, } s := &Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &Config{Chain: mc, P2P: p, DB: beaconDB}, synced: abool.New(), chainStarted: abool.NewBool(true), @@ -656,7 +656,7 @@ func TestService_syncToFinalizedEpoch(t *testing.T) { }, p.Peers()) genesis := makeGenesisTime(currentSlot) s.genesisTime = genesis - assert.NoError(t, s.syncToFinalizedEpoch(context.Background())) + assert.NoError(t, s.syncToFinalizedEpoch(t.Context())) if s.cfg.Chain.HeadSlot() < currentSlot { t.Errorf("Head slot (%d) is less than expected currentSlot (%d)", s.cfg.Chain.HeadSlot(), currentSlot) } @@ -674,7 +674,7 @@ func TestService_syncToFinalizedEpoch(t *testing.T) { // Try to re-sync, should be exited immediately (node is already synced to finalized epoch). hook.Reset() s.genesisTime = genesis - assert.NoError(t, s.syncToFinalizedEpoch(context.Background())) + assert.NoError(t, s.syncToFinalizedEpoch(t.Context())) assert.LogsContain(t, hook, "Already synced to finalized epoch") } @@ -683,7 +683,7 @@ func TestService_ValidUnprocessed(t *testing.T) { genesisBlk := util.NewBeaconBlock() genesisBlkRoot, err := genesisBlk.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, genesisBlk) + util.SaveBlock(t, t.Context(), beaconDB, genesisBlk) var batch []blocks.BlockWithROBlobs currBlockRoot := genesisBlkRoot @@ -694,7 +694,7 @@ func TestService_ValidUnprocessed(t *testing.T) { blk1.Block.ParentRoot = parentRoot[:] blk1Root, err := blk1.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), beaconDB, blk1) + util.SaveBlock(t, t.Context(), beaconDB, blk1) wsb, err := blocks.NewSignedBeaconBlock(blk1) require.NoError(t, err) rowsb, err := blocks.NewROBlock(wsb) @@ -703,7 +703,7 @@ func TestService_ValidUnprocessed(t *testing.T) { currBlockRoot = blk1Root } - retBlocks, err := validUnprocessed(context.Background(), batch, 2, func(ctx context.Context, block blocks.ROBlock) bool { + retBlocks, err := validUnprocessed(t.Context(), batch, 2, func(ctx context.Context, block blocks.ROBlock) bool { // Ignore first 2 blocks in the batch. return block.Block().Slot() <= 2 }) diff --git a/beacon-chain/sync/initial-sync/service_test.go b/beacon-chain/sync/initial-sync/service_test.go index b09ca0e058..c687049a13 100644 --- a/beacon-chain/sync/initial-sync/service_test.go +++ b/beacon-chain/sync/initial-sync/service_test.go @@ -146,7 +146,7 @@ func TestService_InitStartStop(t *testing.T) { } t.Run(tt.name, func(t *testing.T) { defer hook.Reset() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() mc := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} // Allow overriding with customized chain service. @@ -211,7 +211,7 @@ func TestService_waitForStateInitialization(t *testing.T) { t.Run("no state and context close", func(t *testing.T) { defer hook.Reset() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() s, _ := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}) @@ -237,7 +237,7 @@ func TestService_waitForStateInitialization(t *testing.T) { t.Run("no state and state init event received", func(t *testing.T) { defer hook.Reset() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() st, err := util.NewBeaconState() @@ -270,7 +270,7 @@ func TestService_waitForStateInitialization(t *testing.T) { t.Run("no state and state init event received and service start", func(t *testing.T) { defer hook.Reset() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() s, gs := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}) // Initialize mock feed @@ -299,7 +299,7 @@ func TestService_waitForStateInitialization(t *testing.T) { func TestService_markSynced(t *testing.T) { mc := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}} - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(t.Context(), time.Second) defer cancel() s := NewService(ctx, &Config{ Chain: mc, @@ -334,7 +334,7 @@ func TestService_Resync(t *testing.T) { }, p.Peers()) cache.initializeRootCache(makeSequence(1, 160), t) beaconDB := dbtest.SetupDB(t) - util.SaveBlock(t, context.Background(), beaconDB, util.NewBeaconBlock()) + util.SaveBlock(t, t.Context(), beaconDB, util.NewBeaconBlock()) cache.RLock() genesisRoot := cache.rootCache[0] cache.RUnlock() @@ -377,7 +377,7 @@ func TestService_Resync(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { defer hook.Reset() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() mc := &mock.ChainService{} // Allow overriding with customized chain service. @@ -407,7 +407,7 @@ func TestService_Resync(t *testing.T) { } func TestService_Initialized(t *testing.T) { - s := NewService(context.Background(), &Config{ + s := NewService(t.Context(), &Config{ StateNotifier: &mock.MockStateNotifier{}, }) s.chainStarted.Set() @@ -417,7 +417,7 @@ func TestService_Initialized(t *testing.T) { } func TestService_Synced(t *testing.T) { - s := NewService(context.Background(), &Config{}) + s := NewService(t.Context(), &Config{}) s.synced.UnSet() assert.Equal(t, false, s.Synced()) s.synced.Set() @@ -491,7 +491,7 @@ func TestMissingBlobRequest(t *testing.T) { } func TestOriginOutsideRetention(t *testing.T) { - ctx := context.Background() + ctx := t.Context() bdb := dbtest.SetupDB(t) genesis := time.Unix(0, 0) secsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch) diff --git a/beacon-chain/sync/pending_attestations_queue_test.go b/beacon-chain/sync/pending_attestations_queue_test.go index ba83225193..c9e3c8975f 100644 --- a/beacon-chain/sync/pending_attestations_queue_test.go +++ b/beacon-chain/sync/pending_attestations_queue_test.go @@ -60,7 +60,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) { a := ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}} r.blkRootToPendingAtts[[32]byte{'A'}] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a}} - require.NoError(t, r.processPendingAtts(context.Background())) + require.NoError(t, r.processPendingAtts(t.Context())) require.LogsContain(t, hook, "Requesting block by root") } @@ -73,7 +73,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisState(t, validators) sb := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), db, sb) + util.SaveBlock(t, t.Context(), db, sb) root, err := sb.Block.HashTreeRoot() require.NoError(t, err) @@ -88,7 +88,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) { AggregationBits: aggBits, } - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) assert.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -119,7 +119,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) { opn := mock.NewEventFeedWrapper() sub := opn.Subscribe(done) defer sub.Unsubscribe() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cfg: &config{ @@ -138,10 +138,10 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) { s, err := util.NewBeaconState() require.NoError(t, err) - require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root)) + require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root)) r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}} - require.NoError(t, r.processPendingAtts(context.Background())) + require.NoError(t, r.processPendingAtts(t.Context())) var wg sync.WaitGroup wg.Add(1) @@ -176,7 +176,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisStateElectra(t, validators) sb := util.NewBeaconBlockElectra() - util.SaveBlock(t, context.Background(), db, sb) + util.SaveBlock(t, t.Context(), db, sb) root, err := sb.Block.HashTreeRoot() require.NoError(t, err) @@ -191,7 +191,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) { Aggregate: att, } - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) assert.NoError(t, err) att.AttesterIndex = committee[0] attesterDomain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot()) @@ -214,7 +214,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) { opn := mock.NewEventFeedWrapper() sub := opn.Subscribe(done) defer sub.Unsubscribe() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cfg: &config{ @@ -233,10 +233,10 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) { s, err := util.NewBeaconStateElectra() require.NoError(t, err) - require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root)) + require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root)) r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProofSingle{Message: aggregateAndProof}} - require.NoError(t, r.processPendingAtts(context.Background())) + require.NoError(t, r.processPendingAtts(t.Context())) var wg sync.WaitGroup wg.Add(1) go func() { @@ -289,7 +289,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen // Create and save a new Beacon block. sb := util.NewBeaconBlockElectra() - util.SaveBlock(t, context.Background(), db, sb) + util.SaveBlock(t, t.Context(), db, sb) // Save state with block root. root, err := sb.Block.HashTreeRoot() @@ -311,7 +311,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen } // Retrieve the beacon committee and set the attester index. - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.CommitteeId) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.CommitteeId) assert.NoError(t, err) att.AttesterIndex = committee[0] @@ -343,7 +343,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen defer sub.Unsubscribe() // Create context and service configuration. - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() r := &Service{ ctx: ctx, @@ -365,13 +365,13 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen // Save a new beacon state and link it with the block root. s, err := util.NewBeaconStateElectra() require.NoError(t, err) - require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root)) + require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root)) // Add the pending attestation. r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ ðpb.SignedAggregateAttestationAndProofSingle{Message: aggregateAndProof}, } - require.NoError(t, r.processPendingAtts(context.Background())) + require.NoError(t, r.processPendingAtts(t.Context())) // Verify that the event feed receives the expected attestation. var wg sync.WaitGroup @@ -456,11 +456,11 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) { b := util.NewBeaconBlock() r32, err := b.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b) - require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, r32)) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b) + require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, r32)) r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}} - require.NoError(t, r.processPendingAtts(context.Background())) + require.NoError(t, r.processPendingAtts(t.Context())) assert.Equal(t, false, p1.BroadcastCalled.Load(), "Broadcasted bad aggregate") // Clear pool. @@ -480,7 +480,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) { }, AggregationBits: aggBits, } - committee, err := helpers.BeaconCommitteeFromState(context.Background(), s, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), s, att.Data.Slot, att.Data.CommitteeIndex) assert.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -506,7 +506,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) { require.NoError(t, err) require.NoError(t, s.SetGenesisTime(uint64(time.Now().Unix()))) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) chain2 := &mock.ChainService{Genesis: time.Now(), State: s, FinalizedCheckPoint: ðpb.Checkpoint{ @@ -530,7 +530,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) { go r.verifierRoutine() r.blkRootToPendingAtts[r32] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}} - require.NoError(t, r.processPendingAtts(context.Background())) + require.NoError(t, r.processPendingAtts(t.Context())) assert.Equal(t, true, p1.BroadcastCalled.Load(), "Could not broadcast the good aggregate") cancel() @@ -545,7 +545,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisState(t, validators) sb := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), db, sb) + util.SaveBlock(t, t.Context(), db, sb) root, err := sb.Block.HashTreeRoot() require.NoError(t, err) @@ -561,7 +561,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) { AggregationBits: aggBits, } - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) assert.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -598,7 +598,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) { Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot, Epoch: 0, }} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) r := &Service{ ctx: ctx, cfg: &config{ @@ -615,10 +615,10 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) { go r.verifierRoutine() s, err := util.NewBeaconState() require.NoError(t, err) - require.NoError(t, r.cfg.beaconDB.SaveState(context.Background(), s, root)) + require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root)) r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}} - require.NoError(t, r.processPendingAtts(context.Background())) + require.NoError(t, r.processPendingAtts(t.Context())) assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att") assert.DeepEqual(t, att, r.cfg.attPool.AggregatedAttestations()[0], "Incorrect saved att") @@ -661,13 +661,13 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) { assert.Equal(t, 100, len(s.blkRootToPendingAtts[r3]), "Did not save pending atts") // Set current slot to 50, it should prune 19 attestations. (50 - 31) - s.validatePendingAtts(context.Background(), 50) + s.validatePendingAtts(t.Context(), 50) assert.Equal(t, 81, len(s.blkRootToPendingAtts[r1]), "Did not delete pending atts") assert.Equal(t, 81, len(s.blkRootToPendingAtts[r2]), "Did not delete pending atts") assert.Equal(t, 81, len(s.blkRootToPendingAtts[r3]), "Did not delete pending atts") // Set current slot to 100 + slot_duration, it should prune all the attestations. - s.validatePendingAtts(context.Background(), 100+params.BeaconConfig().SlotsPerEpoch) + s.validatePendingAtts(t.Context(), 100+params.BeaconConfig().SlotsPerEpoch) assert.Equal(t, 0, len(s.blkRootToPendingAtts[r1]), "Did not delete pending atts") assert.Equal(t, 0, len(s.blkRootToPendingAtts[r2]), "Did not delete pending atts") assert.Equal(t, 0, len(s.blkRootToPendingAtts[r3]), "Did not delete pending atts") diff --git a/beacon-chain/sync/pending_blocks_queue_test.go b/beacon-chain/sync/pending_blocks_queue_test.go index 35b5ffc156..25a0dfc746 100644 --- a/beacon-chain/sync/pending_blocks_queue_test.go +++ b/beacon-chain/sync/pending_blocks_queue_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "math" "sync" "testing" @@ -62,13 +61,13 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) { r.initCaches() b0 := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b0) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b0) b0Root, err := b0.Block.HashTreeRoot() require.NoError(t, err) b3 := util.NewBeaconBlock() b3.Block.Slot = 3 b3.Block.ParentRoot = b0Root[:] - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b3) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b3) // Incomplete block link b1 := util.NewBeaconBlock() b1.Block.Slot = 1 @@ -86,7 +85,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) { require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root)) - require.NoError(t, r.processPendingBlocks(context.Background())) + require.NoError(t, r.processPendingBlocks(t.Context())) assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block") @@ -94,7 +93,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) { wsb, err = blocks.NewSignedBeaconBlock(b1) require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1Root)) - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b1) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b1) nBlock := util.NewBeaconBlock() nBlock.Block.Slot = b1.Block.Slot @@ -105,8 +104,8 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) { wsb, err = blocks.NewSignedBeaconBlock(nBlock) require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wsb, nRoot)) - require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad - require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run + require.NoError(t, r.processPendingBlocks(t.Context())) // Marks a block as bad + require.NoError(t, r.processPendingBlocks(t.Context())) // Bad block removed on second run assert.Equal(t, 2, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block") @@ -135,13 +134,13 @@ func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) { r.initCaches() b0 := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b0) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b0) b0Root, err := b0.Block.HashTreeRoot() require.NoError(t, err) b3 := util.NewBeaconBlock() b3.Block.Slot = 3 b3.Block.ParentRoot = b0Root[:] - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b3) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b3) // Incomplete block link b1 := util.NewBeaconBlock() b1.Block.Slot = 1 @@ -159,7 +158,7 @@ func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) { require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root)) - require.NoError(t, r.processPendingBlocks(context.Background())) + require.NoError(t, r.processPendingBlocks(t.Context())) assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block") @@ -167,7 +166,7 @@ func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) { wsb, err = blocks.NewSignedBeaconBlock(b1) require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1Root)) - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b1) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b1) nBlock := util.NewBeaconBlock() nBlock.Block.Slot = b1.Block.Slot @@ -178,8 +177,8 @@ func TestRegularSyncBeaconBlockSubscriber_OptimisticStatus(t *testing.T) { wsb, err = blocks.NewSignedBeaconBlock(nBlock) require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wsb, nRoot)) - require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad - require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run + require.NoError(t, r.processPendingBlocks(t.Context())) // Marks a block as bad + require.NoError(t, r.processPendingBlocks(t.Context())) // Bad block removed on second run assert.Equal(t, 2, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block") @@ -209,13 +208,13 @@ func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T) r.initCaches() b0 := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b0) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b0) b0Root, err := b0.Block.HashTreeRoot() require.NoError(t, err) b3 := util.NewBeaconBlock() b3.Block.Slot = 3 b3.Block.ParentRoot = b0Root[:] - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b3) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b3) // Incomplete block link b1 := util.NewBeaconBlock() b1.Block.Slot = 1 @@ -233,7 +232,7 @@ func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T) require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root)) - require.NoError(t, r.processPendingBlocks(context.Background())) + require.NoError(t, r.processPendingBlocks(t.Context())) assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block") @@ -241,7 +240,7 @@ func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T) wsb, err = blocks.NewSignedBeaconBlock(b1) require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wsb, b1Root)) - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b1) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b1) nBlock := util.NewBeaconBlock() nBlock.Block.Slot = b1.Block.Slot @@ -252,8 +251,8 @@ func TestRegularSyncBeaconBlockSubscriber_ExecutionEngineTimesOut(t *testing.T) wsb, err = blocks.NewSignedBeaconBlock(nBlock) require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wsb, nRoot)) - require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad - require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run + require.NoError(t, r.processPendingBlocks(t.Context())) // Marks a block as bad + require.NoError(t, r.processPendingBlocks(t.Context())) // Bad block removed on second run assert.Equal(t, 2, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block") @@ -283,7 +282,7 @@ func TestRegularSync_InsertDuplicateBlocks(t *testing.T) { b0 := util.NewBeaconBlock() b0r := [32]byte{'a'} - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b0) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b0) b0Root, err := b0.Block.HashTreeRoot() require.NoError(t, err) b1 := util.NewBeaconBlock() @@ -337,7 +336,7 @@ func TestRegularSyncBeaconBlockSubscriber_DoNotReprocessBlock(t *testing.T) { r.initCaches() b0 := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b0) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b0) b0Root, err := b0.Block.HashTreeRoot() require.NoError(t, err) b3 := util.NewBeaconBlock() @@ -346,14 +345,14 @@ func TestRegularSyncBeaconBlockSubscriber_DoNotReprocessBlock(t *testing.T) { b3Root, err := b3.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b3) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b3) // Add b3 to the cache wsb, err := blocks.NewSignedBeaconBlock(b3) require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, wsb, b3Root)) - require.NoError(t, r.processPendingBlocks(context.Background())) + require.NoError(t, r.processPendingBlocks(t.Context())) assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block") } @@ -410,13 +409,13 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{}) b0 := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b0) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b0) b0Root, err := b0.Block.HashTreeRoot() require.NoError(t, err) b1 := util.NewBeaconBlock() b1.Block.Slot = 1 b1.Block.ParentRoot = b0Root[:] - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b1) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b1) b1Root, err := b1.Block.HashTreeRoot() require.NoError(t, err) @@ -449,8 +448,8 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b5.Block.Slot, wsb, b5Root)) - require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad - require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run + require.NoError(t, r.processPendingBlocks(t.Context())) // Marks a block as bad + require.NoError(t, r.processPendingBlocks(t.Context())) // Bad block removed on second run assert.Equal(t, 2, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block") @@ -459,10 +458,10 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin wsb, err = blocks.NewSignedBeaconBlock(b3) require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, wsb, b3Root)) - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b3) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b3) - require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad - require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run + require.NoError(t, r.processPendingBlocks(t.Context())) // Marks a block as bad + require.NoError(t, r.processPendingBlocks(t.Context())) // Bad block removed on second run assert.Equal(t, 2, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block") @@ -472,10 +471,10 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wsb, b2Root)) - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b2) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b2) - require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad - require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run + require.NoError(t, r.processPendingBlocks(t.Context())) // Marks a block as bad + require.NoError(t, r.processPendingBlocks(t.Context())) // Bad block removed on second run assert.Equal(t, 2, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block") @@ -509,13 +508,13 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) { p1.Peers().SetChainState(p1.PeerID(), ðpb.Status{}) b0 := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b0) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b0) b0Root, err := b0.Block.HashTreeRoot() require.NoError(t, err) b1 := util.NewBeaconBlock() b1.Block.Slot = 1 b1.Block.ParentRoot = b0Root[:] - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b1) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b1) b1Root, err := b1.Block.HashTreeRoot() require.NoError(t, err) @@ -554,7 +553,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) { require.NoError(t, err) require.NoError(t, r.insertBlockToPendingQueue(b5.Block.Slot, wsb, b5Root)) - require.NoError(t, r.processPendingBlocks(context.Background())) + require.NoError(t, r.processPendingBlocks(t.Context())) assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block") } @@ -615,13 +614,13 @@ func TestService_BatchRootRequest(t *testing.T) { p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{FinalizedEpoch: 2}) b0 := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b0) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b0) b0Root, err := b0.Block.HashTreeRoot() require.NoError(t, err) b1 := util.NewBeaconBlock() b1.Block.Slot = 1 b1.Block.ParentRoot = b0Root[:] - util.SaveBlock(t, context.Background(), r.cfg.beaconDB, b1) + util.SaveBlock(t, t.Context(), r.cfg.beaconDB, b1) b1Root, err := b1.Block.HashTreeRoot() require.NoError(t, err) @@ -668,7 +667,7 @@ func TestService_BatchRootRequest(t *testing.T) { assert.NoError(t, stream.Close()) }) - require.NoError(t, r.sendBatchRootRequest(context.Background(), sentRoots, rand.NewGenerator())) + require.NoError(t, r.sendBatchRootRequest(t.Context(), sentRoots, rand.NewGenerator())) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -707,7 +706,7 @@ func TestService_AddPendingBlockToQueueOverMax(t *testing.T) { } func TestService_ProcessPendingBlockOnCorrectSlot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbtest.SetupDB(t) p1 := p2ptest.NewTestP2P(t) @@ -783,12 +782,12 @@ func TestService_ProcessPendingBlockOnCorrectSlot(t *testing.T) { // processPendingBlocks should process only blocks of the current slot. i.e. slot 1. // Then check if the other two blocks are still in the pendingQueue. - require.NoError(t, r.processPendingBlocks(context.Background())) + require.NoError(t, r.processPendingBlocks(t.Context())) assert.Equal(t, 2, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") } func TestService_ProcessBadPendingBlocks(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbtest.SetupDB(t) p1 := p2ptest.NewTestP2P(t) @@ -849,7 +848,7 @@ func TestService_ProcessBadPendingBlocks(t *testing.T) { } func TestAlreadySyncingBlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbtest.SetupDB(t) hook := logTest.NewGlobal() @@ -886,7 +885,7 @@ func TestAlreadySyncingBlock(t *testing.T) { } func TestExpirationCache_PruneOldBlocksCorrectly(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbtest.SetupDB(t) mockChain := &mock.ChainService{ diff --git a/beacon-chain/sync/rate_limiter_test.go b/beacon-chain/sync/rate_limiter_test.go index 434b5dd7aa..4d0511720c 100644 --- a/beacon-chain/sync/rate_limiter_test.go +++ b/beacon-chain/sync/rate_limiter_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "sync" "testing" "time" @@ -45,7 +44,7 @@ func TestRateLimiter_ExceedCapacity(t *testing.T) { assert.Equal(t, p2ptypes.ErrRateLimited.Error(), errMsg, "not equal errors") }) wg.Add(1) - stream, err := p1.BHost.NewStream(context.Background(), p2.PeerID(), protocol.ID(topic)) + stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), protocol.ID(topic)) require.NoError(t, err, "could not create stream") err = rlimiter.validateRequest(stream, 64) @@ -82,7 +81,7 @@ func TestRateLimiter_ExceedRawCapacity(t *testing.T) { assert.Equal(t, p2ptypes.ErrRateLimited.Error(), errMsg, "not equal errors") }) wg.Add(1) - stream, err := p1.BHost.NewStream(context.Background(), p2.PeerID(), protocol.ID(topic)) + stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), protocol.ID(topic)) require.NoError(t, err, "could not create stream") for i := 0; i < 2*defaultBurstLimit; i++ { diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go index e6d2b45e7c..1e58011511 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "io" "math/big" "sync" @@ -60,7 +59,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) { copy(blk.Block.ParentRoot, prevRoot[:]) prevRoot, err = blk.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), d, blk) + util.SaveBlock(t, t.Context(), d, blk) } clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) @@ -83,10 +82,10 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) { } }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - err = r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1) + err = r.beaconBlocksByRangeRPCHandler(t.Context(), req, stream1) require.NoError(t, err) // Make sure that rate limiter doesn't limit capacity exceedingly. @@ -122,9 +121,9 @@ func TestRPCBeaconBlocksByRange_ReturnCorrectNumberBack(t *testing.T) { require.NoError(t, err) genRoot = rt } - util.SaveBlock(t, context.Background(), d, blk) + util.SaveBlock(t, t.Context(), d, blk) } - require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), genRoot)) + require.NoError(t, d.SaveGenesisBlockRoot(t.Context(), genRoot)) clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) // Start service with 160 as allowed blocks capacity (and almost zero capacity recovery). @@ -154,10 +153,10 @@ func TestRPCBeaconBlocksByRange_ReturnCorrectNumberBack(t *testing.T) { } }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - err = r.beaconBlocksByRangeRPCHandler(context.Background(), newReq, stream1) + err = r.beaconBlocksByRangeRPCHandler(t.Context(), newReq, stream1) require.NoError(t, err) if util.WaitTimeout(&wg, 1*time.Second) { @@ -231,9 +230,9 @@ func TestRPCBeaconBlocksByRange_ReconstructsPayloads(t *testing.T) { require.NoError(t, err) genRoot = rt } - util.SaveBlock(t, context.Background(), d, blk) + util.SaveBlock(t, t.Context(), d, blk) } - require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), genRoot)) + require.NoError(t, d.SaveGenesisBlockRoot(t.Context(), genRoot)) clock := startup.NewClock(time.Unix(0, 0), [32]byte{}) // Start service with 160 as allowed blocks capacity (and almost zero capacity recovery). @@ -274,10 +273,10 @@ func TestRPCBeaconBlocksByRange_ReconstructsPayloads(t *testing.T) { require.Equal(t, uint64(1), mockEngine.NumReconstructedPayloads) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - err = r.beaconBlocksByRangeRPCHandler(context.Background(), newReq, stream1) + err = r.beaconBlocksByRangeRPCHandler(t.Context(), newReq, stream1) require.NoError(t, err) if util.WaitTimeout(&wg, 1*time.Second) { @@ -310,7 +309,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) { require.NoError(t, err) expectedRoots[j] = rt prevRoot = rt - util.SaveBlock(t, context.Background(), d, blk) + util.SaveBlock(t, t.Context(), d, blk) j++ } @@ -342,9 +341,9 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) { } }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1)) + require.NoError(t, r.beaconBlocksByRangeRPCHandler(t.Context(), req, stream1)) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -375,9 +374,9 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) { // Save genesis block if i == 0 { - require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), rt)) + require.NoError(t, d.SaveGenesisBlockRoot(t.Context(), rt)) } - util.SaveBlock(t, context.Background(), d, blk) + util.SaveBlock(t, t.Context(), d, blk) prevRoot = rt } @@ -403,9 +402,9 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) { } }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1)) + require.NoError(t, r.beaconBlocksByRangeRPCHandler(t.Context(), req, stream1)) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -425,7 +424,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) { if req.Step == 1 { block.Block.ParentRoot = parentRoot[:] } - util.SaveBlock(t, context.Background(), d, block) + util.SaveBlock(t, t.Context(), d, block) rt, err := block.Block.HashTreeRoot() require.NoError(t, err) parentRoot = rt @@ -454,9 +453,9 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) { } } }) - stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil { + if err := r.beaconBlocksByRangeRPCHandler(t.Context(), req, stream); err != nil { return err } time.Sleep(100 * time.Millisecond) @@ -678,7 +677,7 @@ func TestRPCBeaconBlocksByRange_EnforceResponseInvariants(t *testing.T) { block := util.NewBeaconBlock() block.Block.Slot = i block.Block.ParentRoot = parentRoot[:] - util.SaveBlock(t, context.Background(), d, block) + util.SaveBlock(t, t.Context(), d, block) rt, err := block.Block.HashTreeRoot() require.NoError(t, err) parentRoot = rt @@ -703,9 +702,9 @@ func TestRPCBeaconBlocksByRange_EnforceResponseInvariants(t *testing.T) { } processBlocks(blocks) }) - stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil { + if err := r.beaconBlocksByRangeRPCHandler(t.Context(), req, stream); err != nil { return err } if util.WaitTimeout(&wg, 1*time.Second) { @@ -754,8 +753,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { previousRoot, err := blk.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), d, blk) - require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), previousRoot)) + util.SaveBlock(t, t.Context(), d, blk) + require.NoError(t, d.SaveGenesisBlockRoot(t.Context(), previousRoot)) blks := make([]*ethpb.SignedBeaconBlock, req.Count) // Populate the database with blocks that would match the request. for i, j := req.StartSlot, 0; i < req.StartSlot.Add(req.Step*req.Count); i += primitives.Slot(req.Step) { @@ -769,7 +768,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { require.NoError(t, err) previousRoot, err = blks[j].Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), d, blks[j]) + util.SaveBlock(t, t.Context(), d, blks[j]) j++ } stateSummaries := make([]*ethpb.StateSummary, len(blks)) @@ -787,8 +786,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { } chain.CanonicalRoots[bRoot] = true } - require.NoError(t, d.SaveStateSummaries(context.Background(), stateSummaries)) - require.NoError(t, d.SaveFinalizedCheckpoint(context.Background(), ðpb.Checkpoint{ + require.NoError(t, d.SaveStateSummaries(t.Context(), stateSummaries)) + require.NoError(t, d.SaveFinalizedCheckpoint(t.Context(), ðpb.Checkpoint{ Epoch: slots.ToEpoch(stateSummaries[len(stateSummaries)-1].Slot), Root: stateSummaries[len(stateSummaries)-1].Root, })) @@ -802,8 +801,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { require.NoError(t, err) genRoot := previousRoot - util.SaveBlock(t, context.Background(), d, blk) - require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), previousRoot)) + util.SaveBlock(t, t.Context(), d, blk) + require.NoError(t, d.SaveGenesisBlockRoot(t.Context(), previousRoot)) blks := make([]*ethpb.SignedBeaconBlock, req.Count) // Populate the database with blocks with non linear roots. for i, j := req.StartSlot, 0; i < req.StartSlot.Add(req.Step*req.Count); i += primitives.Slot(req.Step) { @@ -821,7 +820,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { require.NoError(t, err) previousRoot, err = blks[j].Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), d, blks[j]) + util.SaveBlock(t, t.Context(), d, blks[j]) j++ } stateSummaries := make([]*ethpb.StateSummary, len(blks)) @@ -838,8 +837,8 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { } chain.CanonicalRoots[bRoot] = true } - require.NoError(t, d.SaveStateSummaries(context.Background(), stateSummaries)) - require.NoError(t, d.SaveFinalizedCheckpoint(context.Background(), ðpb.Checkpoint{ + require.NoError(t, d.SaveStateSummaries(t.Context(), stateSummaries)) + require.NoError(t, d.SaveFinalizedCheckpoint(t.Context(), ðpb.Checkpoint{ Epoch: slots.ToEpoch(stateSummaries[len(stateSummaries)-1].Slot), Root: stateSummaries[len(stateSummaries)-1].Root, })) @@ -870,9 +869,9 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { } processBlocks(blocks) }) - stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil { + if err := r.beaconBlocksByRangeRPCHandler(t.Context(), req, stream); err != nil { return err } if util.WaitTimeout(&wg, 1*time.Second) { @@ -1096,7 +1095,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks_PreviousRoot(t *testing.T) { chain := &chainMock.ChainService{} cf := canonicalFilter{canonical: chain.IsCanonical} - seq, nseq, err := cf.filter(context.Background(), blks) + seq, nseq, err := cf.filter(t.Context(), blks) require.NoError(t, err) require.Equal(t, len(blks), len(seq)) require.Equal(t, 0, len(nseq)) diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go b/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go index ed7c0ec5c3..00ff5af73f 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_root_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "math/big" "sync" "testing" @@ -51,7 +50,7 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) { blk.Block.Slot = i root, err := blk.Block.HashTreeRoot() require.NoError(t, err) - util.SaveBlock(t, context.Background(), d, blk) + util.SaveBlock(t, t.Context(), d, blk) blkRoots = append(blkRoots, root) } @@ -75,9 +74,9 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) { } }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - err = r.beaconBlocksRootRPCHandler(context.Background(), &blkRoots, stream1) + err = r.beaconBlocksRootRPCHandler(t.Context(), &blkRoots, stream1) assert.NoError(t, err) if util.WaitTimeout(&wg, 1*time.Second) { @@ -140,7 +139,7 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks_ReconstructsPayload(t *testi require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - require.NoError(t, d.SaveBlock(context.Background(), wsb)) + require.NoError(t, d.SaveBlock(t.Context(), wsb)) blkRoots = append(blkRoots, root) } @@ -175,9 +174,9 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks_ReconstructsPayload(t *testi require.Equal(t, uint64(10), mockEngine.NumReconstructedPayloads) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - err = r.beaconBlocksRootRPCHandler(context.Background(), &blkRoots, stream1) + err = r.beaconBlocksRootRPCHandler(t.Context(), &blkRoots, stream1) assert.NoError(t, err) if util.WaitTimeout(&wg, 1*time.Second) { @@ -199,7 +198,7 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) { require.NoError(t, err) blockBRoot, err := blockB.Block.HashTreeRoot() require.NoError(t, err) - genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{}) + genesisState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{}) require.NoError(t, err) require.NoError(t, genesisState.SetSlot(111)) require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), blockARoot)) @@ -225,7 +224,7 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) { }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), seenPendingBlocks: make(map[[32]byte]bool), - ctx: context.Background(), + ctx: t.Context(), rateLimiter: newRateLimiter(p1), } @@ -252,7 +251,7 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) { }) p1.Connect(p2) - require.NoError(t, r.sendBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID())) + require.NoError(t, r.sendBeaconBlocksRequest(t.Context(), &expectedRoots, p2.PeerID())) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -273,7 +272,7 @@ func TestRecentBeaconBlocks_RPCRequestSent_IncorrectRoot(t *testing.T) { require.NoError(t, err) blockBRoot, err := blockB.Block.HashTreeRoot() require.NoError(t, err) - genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{}) + genesisState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{}) require.NoError(t, err) require.NoError(t, genesisState.SetSlot(111)) require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), blockARoot)) @@ -299,7 +298,7 @@ func TestRecentBeaconBlocks_RPCRequestSent_IncorrectRoot(t *testing.T) { }, slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), seenPendingBlocks: make(map[[32]byte]bool), - ctx: context.Background(), + ctx: t.Context(), rateLimiter: newRateLimiter(p1), } @@ -327,7 +326,7 @@ func TestRecentBeaconBlocks_RPCRequestSent_IncorrectRoot(t *testing.T) { }) p1.Connect(p2) - require.ErrorContains(t, "received unexpected block with root", r.sendBeaconBlocksRequest(context.Background(), &expectedRoots, p2.PeerID())) + require.ErrorContains(t, "received unexpected block with root", r.sendBeaconBlocksRequest(t.Context(), &expectedRoots, p2.PeerID())) } func TestRecentBeaconBlocksRPCHandler_HandleZeroBlocks(t *testing.T) { @@ -349,9 +348,9 @@ func TestRecentBeaconBlocksRPCHandler_HandleZeroBlocks(t *testing.T) { expectFailure(t, 1, "no block roots provided in request", stream) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - err = r.beaconBlocksRootRPCHandler(context.Background(), &p2pTypes.BeaconBlockByRootsReq{}, stream1) + err = r.beaconBlocksRootRPCHandler(t.Context(), &p2pTypes.BeaconBlockByRootsReq{}, stream1) assert.ErrorContains(t, "no block roots provided", err) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -371,14 +370,14 @@ func TestRequestPendingBlobs(t *testing.T) { require.NoError(t, err) request, err := s.pendingBlobsRequestForBlock([32]byte{}, b) require.NoError(t, err) - require.NoError(t, s.sendAndSaveBlobSidecars(context.Background(), request, "test", b)) + require.NoError(t, s.sendAndSaveBlobSidecars(t.Context(), request, "test", b)) }) t.Run("empty commitment block should not fail", func(t *testing.T) { b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock()) require.NoError(t, err) request, err := s.pendingBlobsRequestForBlock([32]byte{}, b) require.NoError(t, err) - require.NoError(t, s.sendAndSaveBlobSidecars(context.Background(), request, "test", b)) + require.NoError(t, s.sendAndSaveBlobSidecars(t.Context(), request, "test", b)) }) t.Run("unsupported protocol", func(t *testing.T) { p1 := p2ptest.NewTestP2P(t) @@ -411,7 +410,7 @@ func TestRequestPendingBlobs(t *testing.T) { require.NoError(t, err) request, err := s.pendingBlobsRequestForBlock([32]byte{}, b1) require.NoError(t, err) - require.ErrorContains(t, "protocols not supported", s.sendAndSaveBlobSidecars(context.Background(), request, p2.PeerID(), b1)) + require.ErrorContains(t, "protocols not supported", s.sendAndSaveBlobSidecars(t.Context(), request, p2.PeerID(), b1)) }) } diff --git a/beacon-chain/sync/rpc_goodbye_test.go b/beacon-chain/sync/rpc_goodbye_test.go index b10114be64..baf48c3e61 100644 --- a/beacon-chain/sync/rpc_goodbye_test.go +++ b/beacon-chain/sync/rpc_goodbye_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "sync" "testing" "time" @@ -52,11 +51,11 @@ func TestGoodByeRPCHandler_Disconnects_With_Peer(t *testing.T) { defer wg.Done() expectResetStream(t, stream) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) failureCode := p2ptypes.GoodbyeCodeClientShutdown - assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream1)) + assert.NoError(t, r.goodbyeRPCHandler(t.Context(), &failureCode, stream1)) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -97,11 +96,11 @@ func TestGoodByeRPCHandler_BackOffPeer(t *testing.T) { defer wg.Done() expectResetStream(t, stream) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) failureCode := p2ptypes.GoodbyeCodeClientShutdown - assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream1)) + assert.NoError(t, r.goodbyeRPCHandler(t.Context(), &failureCode, stream1)) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -124,11 +123,11 @@ func TestGoodByeRPCHandler_BackOffPeer(t *testing.T) { expectResetStream(t, stream) }) - stream2, err := p1.BHost.NewStream(context.Background(), p3.BHost.ID(), pcl) + stream2, err := p1.BHost.NewStream(t.Context(), p3.BHost.ID(), pcl) require.NoError(t, err) failureCode = p2ptypes.GoodbyeCodeBanned - assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream2)) + assert.NoError(t, r.goodbyeRPCHandler(t.Context(), &failureCode, stream2)) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -180,7 +179,7 @@ func TestSendGoodbye_SendsMessage(t *testing.T) { assert.NoError(t, stream.Close()) }) - err := r.sendGoodByeMessage(context.Background(), failureCode, p2.BHost.ID()) + err := r.sendGoodByeMessage(t.Context(), failureCode, p2.BHost.ID()) assert.NoError(t, err) if util.WaitTimeout(&wg, 1*time.Second) { @@ -227,7 +226,7 @@ func TestSendGoodbye_DisconnectWithPeer(t *testing.T) { assert.NoError(t, stream.Close()) }) - assert.NoError(t, r.sendGoodByeAndDisconnect(context.Background(), failureCode, p2.BHost.ID())) + assert.NoError(t, r.sendGoodByeAndDisconnect(t.Context(), failureCode, p2.BHost.ID())) conns := p1.BHost.Network().ConnsToPeer(p2.BHost.ID()) if len(conns) > 0 { t.Error("Peer is still not disconnected despite sending a goodbye message") diff --git a/beacon-chain/sync/rpc_handler_test.go b/beacon-chain/sync/rpc_handler_test.go index d8b079c286..b08f478eec 100644 --- a/beacon-chain/sync/rpc_handler_test.go +++ b/beacon-chain/sync/rpc_handler_test.go @@ -21,7 +21,7 @@ type rpcHandlerTest struct { } func (rt *rpcHandlerTest) testHandler(nh network.StreamHandler, rh rpcHandler, rhi interface{}) { - ctx, cancel := context.WithTimeout(context.Background(), rt.timeout) + ctx, cancel := context.WithTimeout(rt.t.Context(), rt.timeout) defer func() { cancel() }() diff --git a/beacon-chain/sync/rpc_light_client_test.go b/beacon-chain/sync/rpc_light_client_test.go index 6e5922bf72..8a24a737c3 100644 --- a/beacon-chain/sync/rpc_light_client_test.go +++ b/beacon-chain/sync/rpc_light_client_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "sync" "testing" "time" @@ -32,7 +31,7 @@ func TestRPC_LightClientBootstrap(t *testing.T) { }) defer resetFn() - ctx := context.Background() + ctx := t.Context() p2pService := p2ptest.NewTestP2P(t) p1 := p2ptest.NewTestP2P(t) p2 := p2ptest.NewTestP2P(t) @@ -135,7 +134,7 @@ func TestRPC_LightClientBootstrap(t *testing.T) { require.DeepSSZEqual(t, resSSZ, bootstrapSSZ) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) err = r.lightClientBootstrapRPCHandler(ctx, &blockRoot, stream1) require.NoError(t, err) @@ -154,7 +153,7 @@ func TestRPC_LightClientOptimisticUpdate(t *testing.T) { }) defer resetFn() - ctx := context.Background() + ctx := t.Context() p2pService := p2ptest.NewTestP2P(t) p1 := p2ptest.NewTestP2P(t) p2 := p2ptest.NewTestP2P(t) @@ -256,7 +255,7 @@ func TestRPC_LightClientOptimisticUpdate(t *testing.T) { require.DeepSSZEqual(t, resSSZ, updateSSZ) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) err = r.lightClientOptimisticUpdateRPCHandler(ctx, nil, stream1) require.NoError(t, err) @@ -274,7 +273,7 @@ func TestRPC_LightClientFinalityUpdate(t *testing.T) { }) defer resetFn() - ctx := context.Background() + ctx := t.Context() p2pService := p2ptest.NewTestP2P(t) p1 := p2ptest.NewTestP2P(t) p2 := p2ptest.NewTestP2P(t) @@ -376,7 +375,7 @@ func TestRPC_LightClientFinalityUpdate(t *testing.T) { require.DeepSSZEqual(t, resSSZ, updateSSZ) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) err = r.lightClientFinalityUpdateRPCHandler(ctx, nil, stream1) require.NoError(t, err) @@ -394,7 +393,7 @@ func TestRPC_LightClientUpdatesByRange(t *testing.T) { }) defer resetFn() - ctx := context.Background() + ctx := t.Context() p2pService := p2ptest.NewTestP2P(t) p1 := p2ptest.NewTestP2P(t) p2 := p2ptest.NewTestP2P(t) @@ -502,7 +501,7 @@ func TestRPC_LightClientUpdatesByRange(t *testing.T) { responseCounter++ }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) msg := pb.LightClientUpdatesByRangeRequest{ diff --git a/beacon-chain/sync/rpc_metadata_test.go b/beacon-chain/sync/rpc_metadata_test.go index 7ca05fe905..10c0b03c9a 100644 --- a/beacon-chain/sync/rpc_metadata_test.go +++ b/beacon-chain/sync/rpc_metadata_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "sync" "testing" "time" @@ -62,10 +61,10 @@ func TestMetaDataRPCHandler_ReceivesMetadata(t *testing.T) { assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out)) assert.DeepEqual(t, p1.LocalMetadata.InnerObject(), out, "MetadataV0 unequal") }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - assert.NoError(t, r.metaDataHandler(context.Background(), new(interface{}), stream1)) + assert.NoError(t, r.metaDataHandler(t.Context(), new(interface{}), stream1)) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -120,10 +119,10 @@ func TestMetadataRPCHandler_SendsMetadata(t *testing.T) { wg.Add(1) p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) { defer wg.Done() - assert.NoError(t, r2.metaDataHandler(context.Background(), new(interface{}), stream)) + assert.NoError(t, r2.metaDataHandler(t.Context(), new(interface{}), stream)) }) - md, err := r.sendMetaDataRequest(context.Background(), p2.BHost.ID()) + md, err := r.sendMetaDataRequest(t.Context(), p2.BHost.ID()) assert.NoError(t, err) if !equality.DeepEqual(md.InnerObject(), p2.LocalMetadata.InnerObject()) { @@ -191,11 +190,11 @@ func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) { wg.Add(1) p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) { defer wg.Done() - err := r2.metaDataHandler(context.Background(), new(interface{}), stream) + err := r2.metaDataHandler(t.Context(), new(interface{}), stream) assert.NoError(t, err) }) - _, err := r.sendMetaDataRequest(context.Background(), p2.BHost.ID()) + _, err := r.sendMetaDataRequest(t.Context(), p2.BHost.ID()) assert.NoError(t, err) if util.WaitTimeout(&wg, 1*time.Second) { @@ -212,10 +211,10 @@ func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) { wg.Add(1) p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) { defer wg.Done() - assert.NoError(t, r2.metaDataHandler(context.Background(), new(interface{}), stream)) + assert.NoError(t, r2.metaDataHandler(t.Context(), new(interface{}), stream)) }) - md, err := r.sendMetaDataRequest(context.Background(), p2.BHost.ID()) + md, err := r.sendMetaDataRequest(t.Context(), p2.BHost.ID()) assert.NoError(t, err) if !equality.DeepEqual(md.InnerObject(), p2.LocalMetadata.InnerObject()) { @@ -283,11 +282,11 @@ func TestMetadataRPCHandler_SendsMetadataQUIC(t *testing.T) { wg.Add(1) p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) { defer wg.Done() - err := r2.metaDataHandler(context.Background(), new(interface{}), stream) + err := r2.metaDataHandler(t.Context(), new(interface{}), stream) assert.NoError(t, err) }) - _, err := r.sendMetaDataRequest(context.Background(), p2.BHost.ID()) + _, err := r.sendMetaDataRequest(t.Context(), p2.BHost.ID()) assert.NoError(t, err) if util.WaitTimeout(&wg, 1*time.Second) { @@ -304,10 +303,10 @@ func TestMetadataRPCHandler_SendsMetadataQUIC(t *testing.T) { wg.Add(1) p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) { defer wg.Done() - assert.NoError(t, r2.metaDataHandler(context.Background(), new(interface{}), stream)) + assert.NoError(t, r2.metaDataHandler(t.Context(), new(interface{}), stream)) }) - md, err := r.sendMetaDataRequest(context.Background(), p2.BHost.ID()) + md, err := r.sendMetaDataRequest(t.Context(), p2.BHost.ID()) assert.NoError(t, err) if !equality.DeepEqual(md.InnerObject(), p2.LocalMetadata.InnerObject()) { diff --git a/beacon-chain/sync/rpc_ping_test.go b/beacon-chain/sync/rpc_ping_test.go index ba708fe5b3..9bccb2d008 100644 --- a/beacon-chain/sync/rpc_ping_test.go +++ b/beacon-chain/sync/rpc_ping_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "sync" "testing" "time" @@ -65,11 +64,11 @@ func TestPingRPCHandler_ReceivesPing(t *testing.T) { assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out)) assert.Equal(t, uint64(2), uint64(*out)) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) seqNumber := primitives.SSZUint64(2) - assert.NoError(t, r.pingHandler(context.Background(), &seqNumber, stream1)) + assert.NoError(t, r.pingHandler(t.Context(), &seqNumber, stream1)) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -137,10 +136,10 @@ func TestPingRPCHandler_SendsPing(t *testing.T) { out := new(primitives.SSZUint64) assert.NoError(t, r2.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out)) assert.Equal(t, uint64(2), uint64(*out)) - assert.NoError(t, r2.pingHandler(context.Background(), out, stream)) + assert.NoError(t, r2.pingHandler(t.Context(), out, stream)) }) - assert.NoError(t, r.sendPingRequest(context.Background(), p2.BHost.ID())) + assert.NoError(t, r.sendPingRequest(t.Context(), p2.BHost.ID())) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -196,11 +195,11 @@ func TestPingRPCHandler_BadSequenceNumber(t *testing.T) { expectFailure(t, responseCodeInvalidRequest, p2ptypes.ErrInvalidSequenceNum.Error(), stream) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) wantedSeq := primitives.SSZUint64(p2.LocalMetadata.SequenceNumber()) - err = r.pingHandler(context.Background(), &wantedSeq, stream1) + err = r.pingHandler(t.Context(), &wantedSeq, stream1) assert.ErrorContains(t, p2ptypes.ErrInvalidSequenceNum.Error(), err) if util.WaitTimeout(&wg, 1*time.Second) { diff --git a/beacon-chain/sync/rpc_send_request_test.go b/beacon-chain/sync/rpc_send_request_test.go index 3f42a6fbab..d28d094bd0 100644 --- a/beacon-chain/sync/rpc_send_request_test.go +++ b/beacon-chain/sync/rpc_send_request_test.go @@ -28,7 +28,7 @@ import ( ) func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() pcl := fmt.Sprintf("%s/ssz_snappy", p2p.RPCBlocksByRangeTopicV1) @@ -300,7 +300,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) { } func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() pcl := fmt.Sprintf("%s/ssz_snappy", p2p.RPCBlocksByRootTopicV1) @@ -696,7 +696,7 @@ func TestSeqBlobValid(t *testing.T) { func TestSendBlobsByRangeRequest(t *testing.T) { topic := fmt.Sprintf("%s/ssz_snappy", p2p.RPCBlobSidecarsByRangeTopicV1) - ctx := context.Background() + ctx := t.Context() t.Run("single blob - Deneb", func(t *testing.T) { // Setup genesis such that we are currently in deneb. diff --git a/beacon-chain/sync/rpc_status_test.go b/beacon-chain/sync/rpc_status_test.go index a0e65d50a8..dba254d46a 100644 --- a/beacon-chain/sync/rpc_status_test.go +++ b/beacon-chain/sync/rpc_status_test.go @@ -94,9 +94,9 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) { assert.NoError(t, stream.Close()) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - assert.NoError(t, r.statusRPCHandler(context.Background(), ðpb.Status{ForkDigest: bytesutil.PadTo([]byte("f"), 4), HeadRoot: make([]byte, 32), FinalizedRoot: make([]byte, 32)}, stream1)) + assert.NoError(t, r.statusRPCHandler(t.Context(), ðpb.Status{ForkDigest: bytesutil.PadTo([]byte("f"), 4), HeadRoot: make([]byte, 32), FinalizedRoot: make([]byte, 32)}, stream1)) if util.WaitTimeout(&wg, 1*time.Second) { t.Fatal("Did not receive stream within 1 sec") @@ -151,12 +151,12 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) { assert.DeepEqual(t, root[:], out.FinalizedRoot) }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) digest, err := r.currentForkDigest() require.NoError(t, err) - err = r.statusRPCHandler(context.Background(), ðpb.Status{ForkDigest: digest[:], FinalizedRoot: params.BeaconConfig().ZeroHash[:]}, stream1) + err = r.statusRPCHandler(t.Context(), ðpb.Status{ForkDigest: digest[:], FinalizedRoot: params.BeaconConfig().ZeroHash[:]}, stream1) assert.NoError(t, err) if util.WaitTimeout(&wg, 1*time.Second) { @@ -183,12 +183,12 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) { finalized.Block.Slot = blkSlot finalizedRoot, err := finalized.Block.HashTreeRoot() require.NoError(t, err) - genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{}) + genesisState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{}) require.NoError(t, err) require.NoError(t, genesisState.SetSlot(111)) require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), headRoot)) - util.SaveBlock(t, context.Background(), db, finalized) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), finalizedRoot)) + util.SaveBlock(t, t.Context(), db, finalized) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), finalizedRoot)) finalizedCheckpt := ðpb.Checkpoint{ Epoch: 3, Root: finalizedRoot[:], @@ -245,10 +245,10 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) { t.Errorf("Did not receive expected message. Got %+v wanted %+v", out, expected) } }) - stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl) + stream1, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl) require.NoError(t, err) - err = r.statusRPCHandler(context.Background(), ðpb.Status{ + err = r.statusRPCHandler(t.Context(), ðpb.Status{ ForkDigest: digest[:], FinalizedRoot: finalizedRoot[:], FinalizedEpoch: 3, @@ -261,7 +261,7 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) { } func TestHandshakeHandlers_Roundtrip(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -437,7 +437,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) { finalized.Block.Slot = 40 finalizedRoot, err := finalized.Block.HashTreeRoot() require.NoError(t, err) - genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{}) + genesisState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{}) require.NoError(t, err) require.NoError(t, genesisState.SetSlot(111)) require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), headRoot)) @@ -463,7 +463,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) { chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), }, - ctx: context.Background(), + ctx: t.Context(), rateLimiter: newRateLimiter(p1), } @@ -516,14 +516,14 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) { finalized.Block.Slot = blkSlot finalizedRoot, err := finalized.Block.HashTreeRoot() require.NoError(t, err) - genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)}) + genesisState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)}) require.NoError(t, err) require.NoError(t, genesisState.SetSlot(111)) require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), headRoot)) blk := util.NewBeaconBlock() blk.Block.Slot = blkSlot - util.SaveBlock(t, context.Background(), db, blk) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), finalizedRoot)) + util.SaveBlock(t, t.Context(), db, blk) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), finalizedRoot)) finalizedCheckpt := ðpb.Checkpoint{ Epoch: 3, Root: finalizedRoot[:], @@ -551,7 +551,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) { clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), stateNotifier: chain.StateNotifier(), }, - ctx: context.Background(), + ctx: t.Context(), rateLimiter: newRateLimiter(p1), } chain2 := &mock.ChainService{ @@ -576,7 +576,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) { beaconDB: db, stateNotifier: chain.StateNotifier(), }, - ctx: context.Background(), + ctx: t.Context(), rateLimiter: newRateLimiter(p1), } @@ -590,7 +590,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) { defer wg.Done() out := ðpb.Status{} assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out)) - assert.NoError(t, r2.validateStatusMessage(context.Background(), out)) + assert.NoError(t, r2.validateStatusMessage(t.Context(), out)) }) p1.AddConnectionHandler(r.sendRPCStatusRequest, nil) @@ -604,9 +604,9 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) { } func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { - db, err := kv.NewKVStore(context.Background(), t.TempDir()) + db, err := kv.NewKVStore(t.Context(), t.TempDir()) require.NoError(t, err) - bState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)}) + bState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)}) require.NoError(t, err) blk := util.NewBeaconBlock() @@ -616,10 +616,10 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { wsb, err := consensusblocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - require.NoError(t, db.SaveBlock(context.Background(), wsb)) - require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genRoot)) + require.NoError(t, db.SaveBlock(t.Context(), wsb)) + require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genRoot)) blocksTillHead := makeBlocks(t, 1, 1000, genRoot) - require.NoError(t, db.SaveBlocks(context.Background(), blocksTillHead)) + require.NoError(t, db.SaveBlocks(t.Context(), blocksTillHead)) stateSummaries := make([]*ethpb.StateSummary, len(blocksTillHead)) for i, b := range blocksTillHead { @@ -630,7 +630,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { Root: bRoot[:], } } - require.NoError(t, db.SaveStateSummaries(context.Background(), stateSummaries)) + require.NoError(t, db.SaveStateSummaries(t.Context(), stateSummaries)) rootFetcher := func(slot primitives.Slot) [32]byte { rt, err := blocksTillHead[slot-1].Block().HashTreeRoot() @@ -711,7 +711,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { Epoch: tt.remoteFinalizedEpoch, Root: tt.remoteFinalizedRoot[:], } - require.NoError(t, db.SaveFinalizedCheckpoint(context.Background(), finalizedCheckpt)) + require.NoError(t, db.SaveFinalizedCheckpoint(t.Context(), finalizedCheckpt)) epoch := expectedFinalizedEpoch.Add(2) totalSec := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch) * params.BeaconConfig().SecondsPerSlot)) @@ -739,7 +739,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), stateNotifier: chain.StateNotifier(), }, - ctx: context.Background(), + ctx: t.Context(), rateLimiter: newRateLimiter(p1), } chain2 := &mock.ChainService{ @@ -766,7 +766,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { stateNotifier: chain.StateNotifier(), }, - ctx: context.Background(), + ctx: t.Context(), rateLimiter: newRateLimiter(p1), } @@ -780,7 +780,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { defer wg.Done() out := ðpb.Status{} assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, out)) - assert.Equal(t, tt.expectError, r2.validateStatusMessage(context.Background(), out) != nil) + assert.Equal(t, tt.expectError, r2.validateStatusMessage(t.Context(), out) != nil) }) p1.AddConnectionHandler(r.sendRPCStatusRequest, nil) @@ -797,7 +797,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) { } func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -903,7 +903,7 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) { finalized.Block.Slot = blkSlot finalizedRoot, err := finalized.Block.HashTreeRoot() require.NoError(t, err) - genesisState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{}) + genesisState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{}) require.NoError(t, err) require.NoError(t, genesisState.SetSlot(111)) require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%uint64(params.BeaconConfig().SlotsPerHistoricalRoot), headRoot)) @@ -928,7 +928,7 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) { clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), stateNotifier: chain.StateNotifier(), }, - ctx: context.Background(), + ctx: t.Context(), } digest, err := r.currentForkDigest() require.NoError(t, err) @@ -993,7 +993,7 @@ func TestShouldResync(t *testing.T) { }, } for _, tt := range tests { - headState, err := transition.GenesisBeaconState(context.Background(), nil, 0, ðpb.Eth1Data{}) + headState, err := transition.GenesisBeaconState(t.Context(), nil, 0, ðpb.Eth1Data{}) require.NoError(t, err) require.NoError(t, headState.SetSlot(tt.args.headSlot)) chain := &mock.ChainService{ @@ -1007,7 +1007,7 @@ func TestShouldResync(t *testing.T) { initialSync: &mockSync.Sync{IsSyncing: tt.args.syncing}, stateNotifier: chain.StateNotifier(), }, - ctx: context.Background(), + ctx: t.Context(), } t.Run(tt.name, func(t *testing.T) { if got := r.shouldReSync(); got != tt.want { diff --git a/beacon-chain/sync/rpc_test.go b/beacon-chain/sync/rpc_test.go index 4b4f4ab4de..ca3f1e319e 100644 --- a/beacon-chain/sync/rpc_test.go +++ b/beacon-chain/sync/rpc_test.go @@ -51,7 +51,7 @@ func expectResetStream(t *testing.T, stream network.Stream) { func TestRegisterRPC_ReceivesValidMessage(t *testing.T) { p2p := p2ptest.NewTestP2P(t) r := &Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &config{p2p: p2p}, rateLimiter: newRateLimiter(p2p), } @@ -89,7 +89,7 @@ func TestRPC_ReceivesInvalidMessage(t *testing.T) { remotePeer.Connect(p2p) r := &Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &config{p2p: p2p}, rateLimiter: newRateLimiter(p2p), } @@ -112,7 +112,7 @@ func TestRPC_ReceivesInvalidMessage(t *testing.T) { }() r.registerRPC(topic, handler) - stream, err := remotePeer.Host().NewStream(context.Background(), p2p.BHost.ID(), protocol.ID(topic+p2p.Encoding().ProtocolSuffix())) + stream, err := remotePeer.Host().NewStream(t.Context(), p2p.BHost.ID(), protocol.ID(topic+p2p.Encoding().ProtocolSuffix())) require.NoError(t, err) // Write invalid SSZ object to peer. _, err = stream.Write([]byte("JUNK MESSAGE")) diff --git a/beacon-chain/sync/service_test.go b/beacon-chain/sync/service_test.go index 6bb7086682..e2da1e1512 100644 --- a/beacon-chain/sync/service_test.go +++ b/beacon-chain/sync/service_test.go @@ -51,7 +51,7 @@ func TestSyncHandlers_WaitToSync(t *testing.T) { } gs := startup.NewClockSynchronizer() r := Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &config{ p2p: p2p, chain: chainService, @@ -89,7 +89,7 @@ func TestSyncHandlers_WaitForChainStart(t *testing.T) { } gs := startup.NewClockSynchronizer() r := Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &config{ p2p: p2p, chain: chainService, @@ -113,7 +113,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) { Genesis: time.Now(), ValidatorsRoot: [32]byte{'A'}, } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) defer cancel() gs := startup.NewClockSynchronizer() r := Service{ @@ -184,7 +184,7 @@ func TestSyncService_StopCleanly(t *testing.T) { Genesis: time.Now(), ValidatorsRoot: [32]byte{'A'}, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) gs := startup.NewClockSynchronizer() r := Service{ ctx: ctx, diff --git a/beacon-chain/sync/subscriber_beacon_aggregate_proof_test.go b/beacon-chain/sync/subscriber_beacon_aggregate_proof_test.go index 51285d8fed..8e1f9199dc 100644 --- a/beacon-chain/sync/subscriber_beacon_aggregate_proof_test.go +++ b/beacon-chain/sync/subscriber_beacon_aggregate_proof_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "testing" mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing" @@ -33,7 +32,7 @@ func TestBeaconAggregateProofSubscriber_CanSaveAggregatedAttestation(t *testing. }, Signature: make([]byte, fieldparams.BLSSignatureLength), } - require.NoError(t, r.beaconAggregateProofSubscriber(context.Background(), a)) + require.NoError(t, r.beaconAggregateProofSubscriber(t.Context(), a)) assert.DeepSSZEqual(t, []ethpb.Att{a.Message.Aggregate}, r.cfg.attPool.AggregatedAttestations(), "Did not save aggregated attestation") } @@ -55,7 +54,7 @@ func TestBeaconAggregateProofSubscriber_CanSaveUnaggregatedAttestation(t *testin AggregatorIndex: 100, }, } - require.NoError(t, r.beaconAggregateProofSubscriber(context.Background(), a)) + require.NoError(t, r.beaconAggregateProofSubscriber(t.Context(), a)) atts := r.cfg.attPool.UnaggregatedAttestations() assert.DeepEqual(t, []ethpb.Att{a.Message.Aggregate}, atts, "Did not save unaggregated attestation") diff --git a/beacon-chain/sync/subscriber_beacon_blocks_test.go b/beacon-chain/sync/subscriber_beacon_blocks_test.go index 0fcc6067b7..2636c5779a 100644 --- a/beacon-chain/sync/subscriber_beacon_blocks_test.go +++ b/beacon-chain/sync/subscriber_beacon_blocks_test.go @@ -1,7 +1,6 @@ package sync import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain" @@ -86,7 +85,7 @@ func TestService_beaconBlockSubscriber(t *testing.T) { } } // Perform method under test call. - err := s.beaconBlockSubscriber(context.Background(), tt.args.msg) + err := s.beaconBlockSubscriber(t.Context(), tt.args.msg) if tt.wantedErr != "" { assert.ErrorContains(t, tt.wantedErr, err) } else { @@ -109,7 +108,7 @@ func TestService_BeaconBlockSubscribe_ExecutionEngineTimesOut(t *testing.T) { seenBlockCache: lruwrpr.New(10), badBlockCache: lruwrpr.New(10), } - require.ErrorIs(t, execution.ErrHTTPTimeout, s.beaconBlockSubscriber(context.Background(), util.NewBeaconBlock())) + require.ErrorIs(t, execution.ErrHTTPTimeout, s.beaconBlockSubscriber(t.Context(), util.NewBeaconBlock())) require.Equal(t, 0, len(s.badBlockCache.Keys())) require.Equal(t, 1, len(s.seenBlockCache.Keys())) } @@ -127,7 +126,7 @@ func TestService_BeaconBlockSubscribe_UndefinedEeError(t *testing.T) { seenBlockCache: lruwrpr.New(10), badBlockCache: lruwrpr.New(10), } - require.ErrorIs(t, s.beaconBlockSubscriber(context.Background(), util.NewBeaconBlock()), blockchain.ErrUndefinedExecutionEngineError) + require.ErrorIs(t, s.beaconBlockSubscriber(t.Context(), util.NewBeaconBlock()), blockchain.ErrUndefinedExecutionEngineError) require.Equal(t, 0, len(s.badBlockCache.Keys())) require.Equal(t, 1, len(s.seenBlockCache.Keys())) } @@ -188,7 +187,7 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) { }, seenBlobCache: lruwrpr.New(1), } - s.reconstructAndBroadcastBlobs(context.Background(), sb) + s.reconstructAndBroadcastBlobs(t.Context(), sb) require.Equal(t, tt.expectedBlobCount, len(chainService.Blobs)) }) } diff --git a/beacon-chain/sync/subscriber_test.go b/beacon-chain/sync/subscriber_test.go index dc867ab7c8..04156dc6af 100644 --- a/beacon-chain/sync/subscriber_test.go +++ b/beacon-chain/sync/subscriber_test.go @@ -45,7 +45,7 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) { gt := time.Now() vr := [32]byte{'A'} r := Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &config{ p2p: p2pService, initialSync: &mockSync.Sync{IsSyncing: false}, @@ -88,7 +88,7 @@ func TestSubscribe_UnsubscribeTopic(t *testing.T) { gt := time.Now() vr := [32]byte{'A'} r := Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &config{ p2p: p2pService, initialSync: &mockSync.Sync{IsSyncing: false}, @@ -130,7 +130,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) { params.OverrideBeaconConfig(cfg) p2pService := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() d := db.SetupDB(t) gt := time.Now() vr := [32]byte{'A'} @@ -185,7 +185,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) { func TestSubscribe_ReceivesProposerSlashing(t *testing.T) { p2pService := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() chainService := &mockChain.ChainService{ ValidatorsRoot: [32]byte{'A'}, Genesis: time.Now(), @@ -244,7 +244,7 @@ func TestSubscribe_HandlesPanic(t *testing.T) { ValidatorsRoot: [32]byte{'A'}, } r := Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &config{ chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), @@ -281,7 +281,7 @@ func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) { ValidatorsRoot: [32]byte{'A'}, } r := Service{ - ctx: context.Background(), + ctx: t.Context(), cfg: &config{ chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), @@ -420,7 +420,7 @@ func Test_wrapAndReportValidation(t *testing.T) { subHandler: newSubTopicHandler(), } _, v := s.wrapAndReportValidation(tt.args.topic, tt.args.v) - got := v(context.Background(), tt.args.pid, tt.args.msg) + got := v(t.Context(), tt.args.pid, tt.args.msg) if got != tt.want { t.Errorf("wrapAndReportValidation() got = %v, want %v", got, tt.want) } @@ -440,7 +440,7 @@ func TestFilterSubnetPeers(t *testing.T) { // Reset config. defer flags.Init(new(flags.GlobalFlags)) p := p2ptest.NewTestP2P(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() currSlot := primitives.Slot(100) @@ -517,7 +517,7 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) { params.OverrideBeaconConfig(cfg) p := p2ptest.NewTestP2P(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) gt := time.Now() vr := [32]byte{'A'} r := Service{ @@ -564,7 +564,7 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) { cfg.SlotsPerEpoch = 4 params.OverrideBeaconConfig(cfg) params.BeaconConfig().InitializeForkSchedule() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) currSlot := primitives.Slot(100) gt := time.Now().Add(-time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) vr := [32]byte{'A'} @@ -650,7 +650,7 @@ func TestSubscribe_ReceivesLCOptimisticUpdate(t *testing.T) { params.SetupTestConfigCleanup(t) p2pService := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() cfg := params.BeaconConfig().Copy() cfg.AltairForkEpoch = 1 cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1 @@ -717,7 +717,7 @@ func TestSubscribe_ReceivesLCFinalityUpdate(t *testing.T) { params.SetupTestConfigCleanup(t) p2pService := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() cfg := params.BeaconConfig().Copy() cfg.AltairForkEpoch = 1 cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1 diff --git a/beacon-chain/sync/sync_fuzz_test.go b/beacon-chain/sync/sync_fuzz_test.go index 8877770d22..0703d8dd67 100644 --- a/beacon-chain/sync/sync_fuzz_test.go +++ b/beacon-chain/sync/sync_fuzz_test.go @@ -4,7 +4,6 @@ package sync import ( "bytes" - "context" "reflect" "testing" "time" @@ -34,7 +33,7 @@ import ( func FuzzValidateBeaconBlockPubSub_Phase0(f *testing.F) { db := dbtest.SetupDB(f) p := p2ptest.NewFuzzTestP2P() - ctx := context.Background() + ctx := f.Context() beaconState, privKeys := util.DeterministicGenesisState(f, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(f, ctx, db, parentBlock) @@ -117,7 +116,7 @@ func FuzzValidateBeaconBlockPubSub_Phase0(f *testing.F) { func FuzzValidateBeaconBlockPubSub_Altair(f *testing.F) { db := dbtest.SetupDB(f) p := p2ptest.NewFuzzTestP2P() - ctx := context.Background() + ctx := f.Context() beaconState, privKeys := util.DeterministicGenesisStateAltair(f, 100) parentBlock := util.NewBeaconBlockAltair() util.SaveBlock(f, ctx, db, parentBlock) @@ -201,7 +200,7 @@ func FuzzValidateBeaconBlockPubSub_Altair(f *testing.F) { func FuzzValidateBeaconBlockPubSub_Bellatrix(f *testing.F) { db := dbtest.SetupDB(f) p := p2ptest.NewFuzzTestP2P() - ctx := context.Background() + ctx := f.Context() beaconState, privKeys := util.DeterministicGenesisStateBellatrix(f, 100) parentBlock := util.NewBeaconBlockBellatrix() util.SaveBlock(f, ctx, db, parentBlock) diff --git a/beacon-chain/sync/validate_aggregate_proof_test.go b/beacon-chain/sync/validate_aggregate_proof_test.go index d897bdf886..f3c934943d 100644 --- a/beacon-chain/sync/validate_aggregate_proof_test.go +++ b/beacon-chain/sync/validate_aggregate_proof_test.go @@ -33,7 +33,7 @@ import ( ) func TestVerifyIndexInCommittee_CanVerify(t *testing.T) { - ctx := context.Background() + ctx := t.Context() params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MinimalSpecConfig()) @@ -46,7 +46,7 @@ func TestVerifyIndexInCommittee_CanVerify(t *testing.T) { bf.SetBitAt(0, true) att := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bf} - committee, err := helpers.BeaconCommitteeFromState(context.Background(), s, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), s, att.Data.Slot, att.Data.CommitteeIndex) assert.NoError(t, err) indices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -62,7 +62,7 @@ func TestVerifyIndexInCommittee_CanVerify(t *testing.T) { } func TestVerifyIndexInCommittee_ExistsInBeaconCommittee(t *testing.T) { - ctx := context.Background() + ctx := t.Context() params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MinimalSpecConfig()) @@ -72,7 +72,7 @@ func TestVerifyIndexInCommittee_ExistsInBeaconCommittee(t *testing.T) { att := ðpb.Attestation{Data: ðpb.AttestationData{}} - committee, err := helpers.BeaconCommitteeFromState(context.Background(), s, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), s, att.Data.Slot, att.Data.CommitteeIndex) require.NoError(t, err) bl := bitfield.NewBitlist(uint64(len(committee))) @@ -101,7 +101,7 @@ func TestVerifyIndexInCommittee_ExistsInBeaconCommittee(t *testing.T) { } func TestVerifyIndexInCommittee_ExistsInBeaconCommittee_Electra(t *testing.T) { - ctx := context.Background() + ctx := t.Context() params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MinimalSpecConfig()) @@ -111,7 +111,7 @@ func TestVerifyIndexInCommittee_ExistsInBeaconCommittee_Electra(t *testing.T) { att := ðpb.AttestationElectra{Data: ðpb.AttestationData{}} - committee, err := helpers.BeaconCommitteeFromState(context.Background(), s, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), s, att.Data.Slot, att.Data.CommitteeIndex) require.NoError(t, err) bl := bitfield.NewBitlist(uint64(len(committee))) @@ -159,13 +159,13 @@ func TestVerifyIndexInCommittee_ExistsInBeaconCommittee_Electra(t *testing.T) { } func TestVerifyIndexInCommittee_Electra(t *testing.T) { - ctx := context.Background() + ctx := t.Context() s, _ := util.DeterministicGenesisStateElectra(t, 64) service := &Service{} cb := primitives.NewAttestationCommitteeBits() cb.SetBitAt(0, true) att := ðpb.AttestationElectra{Data: ðpb.AttestationData{}, CommitteeBits: cb} - committee, err := helpers.BeaconCommitteeFromState(context.Background(), s, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), s, att.Data.Slot, att.Data.CommitteeIndex) require.NoError(t, err) bl := bitfield.NewBitlist(uint64(len(committee))) bl.SetBitAt(0, true) @@ -177,7 +177,7 @@ func TestVerifyIndexInCommittee_Electra(t *testing.T) { } func TestVerifySelection_NotAnAggregator(t *testing.T) { - ctx := context.Background() + ctx := t.Context() params.SetupTestConfigCleanup(t) params.OverrideBeaconConfig(params.MinimalSpecConfig()) validators := uint64(2048) @@ -236,7 +236,7 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) { }, } - if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept { + if res, err := r.validateAggregateAndProof(t.Context(), "", msg); res == pubsub.ValidationAccept { _ = err t.Error("Expected validate to fail") } @@ -250,12 +250,12 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) { beaconState, _ := util.DeterministicGenesisState(t, validators) b := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), db, b) + util.SaveBlock(t, t.Context(), db, b) root, err := b.Block.HashTreeRoot() require.NoError(t, err) s, err := util.NewBeaconState() require.NoError(t, err) - require.NoError(t, db.SaveState(context.Background(), s, root)) + require.NoError(t, db.SaveState(t.Context(), s, root)) aggBits := bitfield.NewBitlist(3) aggBits.SetBitAt(0, true) @@ -306,7 +306,7 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) { }, } - if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept { + if res, err := r.validateAggregateAndProof(t.Context(), "", msg); res == pubsub.ValidationAccept { _ = err t.Error("Expected validate to fail") } @@ -323,7 +323,7 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) { Topic: &topic, }, } - if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept { + if res, err := r.validateAggregateAndProof(t.Context(), "", msg); res == pubsub.ValidationAccept { _ = err t.Error("Expected validate to fail") } @@ -337,7 +337,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) { beaconState, _ := util.DeterministicGenesisState(t, validators) b := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), db, b) + util.SaveBlock(t, t.Context(), db, b) root, err := b.Block.HashTreeRoot() require.NoError(t, err) @@ -389,7 +389,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) { } require.NoError(t, r.cfg.attPool.SaveBlockAttestation(att)) - if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept { + if res, err := r.validateAggregateAndProof(t.Context(), "", msg); res == pubsub.ValidationAccept { _ = err t.Error("Expected validate to fail") } @@ -403,12 +403,12 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisState(t, validators) b := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), db, b) + util.SaveBlock(t, t.Context(), db, b) root, err := b.Block.HashTreeRoot() require.NoError(t, err) s, err := util.NewBeaconState() require.NoError(t, err) - require.NoError(t, db.SaveState(context.Background(), s, root)) + require.NoError(t, db.SaveState(t.Context(), s, root)) aggBits := bitfield.NewBitlist(validators / uint64(params.BeaconConfig().SlotsPerEpoch)) aggBits.SetBitAt(0, true) @@ -422,7 +422,7 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { AggregationBits: aggBits, } - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) assert.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -451,7 +451,7 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { require.NoError(t, err) require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix()))) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() chain := &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()), Optimistic: true, @@ -493,7 +493,7 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) { Topic: &topic, }, } - res, err := r.validateAggregateAndProof(context.Background(), "", msg) + res, err := r.validateAggregateAndProof(t.Context(), "", msg) assert.NoError(t, err) assert.Equal(t, pubsub.ValidationAccept, res, "Validated status is false") assert.NotNil(t, msg.ValidatorData, "Did not set validator data") @@ -507,12 +507,12 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) { beaconState, privKeys := util.DeterministicGenesisState(t, validators) b := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), db, b) + util.SaveBlock(t, t.Context(), db, b) root, err := b.Block.HashTreeRoot() require.NoError(t, err) s, err := util.NewBeaconState() require.NoError(t, err) - require.NoError(t, db.SaveState(context.Background(), s, root)) + require.NoError(t, db.SaveState(t.Context(), s, root)) aggBits := bitfield.NewBitlist(validators / uint64(params.BeaconConfig().SlotsPerEpoch)) aggBits.SetBitAt(0, true) @@ -526,7 +526,7 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) { AggregationBits: aggBits, } - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) require.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -554,7 +554,7 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) { require.NoError(t, err) require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix()))) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() chain := &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()), DB: db, @@ -596,7 +596,7 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) { Topic: &topic, }, } - res, err := r.validateAggregateAndProof(context.Background(), "", msg) + res, err := r.validateAggregateAndProof(t.Context(), "", msg) assert.NoError(t, err) require.Equal(t, pubsub.ValidationAccept, res, "Validated status is false") @@ -613,7 +613,7 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) { } time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers. - if res, err := r.validateAggregateAndProof(context.Background(), "", msg); res == pubsub.ValidationAccept { + if res, err := r.validateAggregateAndProof(t.Context(), "", msg); res == pubsub.ValidationAccept { _ = err t.Fatal("Validated status is true") } @@ -632,7 +632,7 @@ func TestValidateAggregateAndProof_BadBlock(t *testing.T) { require.NoError(t, err) s, err := util.NewBeaconState() require.NoError(t, err) - require.NoError(t, db.SaveState(context.Background(), s, root)) + require.NoError(t, db.SaveState(t.Context(), s, root)) aggBits := bitfield.NewBitlist(validators / uint64(params.BeaconConfig().SlotsPerEpoch)) aggBits.SetBitAt(0, true) @@ -645,7 +645,7 @@ func TestValidateAggregateAndProof_BadBlock(t *testing.T) { AggregationBits: aggBits, } - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) assert.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -693,7 +693,7 @@ func TestValidateAggregateAndProof_BadBlock(t *testing.T) { } r.initCaches() // Set beacon block as bad. - r.setBadBlock(context.Background(), root) + r.setBadBlock(t.Context(), root) buf := new(bytes.Buffer) _, err = p.Encoding().EncodeGossip(buf, signedAggregateAndProof) require.NoError(t, err) @@ -705,7 +705,7 @@ func TestValidateAggregateAndProof_BadBlock(t *testing.T) { Topic: &topic, }, } - res, err := r.validateAggregateAndProof(context.Background(), "", msg) + res, err := r.validateAggregateAndProof(t.Context(), "", msg) assert.NotNil(t, err) assert.Equal(t, pubsub.ValidationReject, res, "Validated status is true") } @@ -718,12 +718,12 @@ func TestValidateAggregateAndProof_RejectWhenAttEpochDoesntEqualTargetEpoch(t *t beaconState, privKeys := util.DeterministicGenesisState(t, validators) b := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), db, b) + util.SaveBlock(t, t.Context(), db, b) root, err := b.Block.HashTreeRoot() require.NoError(t, err) s, err := util.NewBeaconState() require.NoError(t, err) - require.NoError(t, db.SaveState(context.Background(), s, root)) + require.NoError(t, db.SaveState(t.Context(), s, root)) aggBits := bitfield.NewBitlist(validators / uint64(params.BeaconConfig().SlotsPerEpoch)) aggBits.SetBitAt(0, true) @@ -736,7 +736,7 @@ func TestValidateAggregateAndProof_RejectWhenAttEpochDoesntEqualTargetEpoch(t *t AggregationBits: aggBits, } - committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) + committee, err := helpers.BeaconCommitteeFromState(t.Context(), beaconState, att.Data.Slot, att.Data.CommitteeIndex) assert.NoError(t, err) attestingIndices, err := attestation.AttestingIndices(att, committee) require.NoError(t, err) @@ -795,7 +795,7 @@ func TestValidateAggregateAndProof_RejectWhenAttEpochDoesntEqualTargetEpoch(t *t Topic: &topic, }, } - res, err := r.validateAggregateAndProof(context.Background(), "", msg) + res, err := r.validateAggregateAndProof(t.Context(), "", msg) assert.NotNil(t, err) assert.Equal(t, pubsub.ValidationReject, res) } diff --git a/beacon-chain/sync/validate_attester_slashing_test.go b/beacon-chain/sync/validate_attester_slashing_test.go index 4a28e2273c..0814a40240 100644 --- a/beacon-chain/sync/validate_attester_slashing_test.go +++ b/beacon-chain/sync/validate_attester_slashing_test.go @@ -77,7 +77,7 @@ func setupValidAttesterSlashing(t *testing.T) (*ethpb.AttesterSlashing, state.Be func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() slashing, s := setupValidAttesterSlashing(t) @@ -118,7 +118,7 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) { func TestValidateAttesterSlashing_ValidOldSlashing(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() slashing, s := setupValidAttesterSlashing(t) vals := s.Validators() @@ -162,7 +162,7 @@ func TestValidateAttesterSlashing_ValidOldSlashing(t *testing.T) { func TestValidateAttesterSlashing_InvalidSlashing_WithdrawableEpoch(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() slashing, s := setupValidAttesterSlashing(t) // Set only one of the validators as withdrawn @@ -220,7 +220,7 @@ func TestValidateAttesterSlashing_InvalidSlashing_WithdrawableEpoch(t *testing.T func TestValidateAttesterSlashing_CanFilter(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() chain := &mock.ChainService{Genesis: time.Now()} r := &Service{ @@ -290,7 +290,7 @@ func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) { slashing, s := setupValidAttesterSlashing(t) slashing.Attestation_1.Data.Target.Epoch = 100000000 - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancel := context.WithTimeout(t.Context(), 100*time.Millisecond) defer cancel() chain := &mock.ChainService{State: s} @@ -323,7 +323,7 @@ func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) { func TestValidateAttesterSlashing_Syncing(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() slashing, s := setupValidAttesterSlashing(t) diff --git a/beacon-chain/sync/validate_beacon_attestation_test.go b/beacon-chain/sync/validate_beacon_attestation_test.go index 7637363809..0b3d9247b9 100644 --- a/beacon-chain/sync/validate_beacon_attestation_test.go +++ b/beacon-chain/sync/validate_beacon_attestation_test.go @@ -39,7 +39,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { DB: db, Optimistic: true, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() s := &Service{ ctx: ctx, @@ -78,7 +78,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { validators := uint64(64) savedState, keys := util.DeterministicGenesisState(t, validators) require.NoError(t, savedState.SetSlot(1)) - require.NoError(t, db.SaveState(context.Background(), savedState, validBlockRoot)) + require.NoError(t, db.SaveState(t.Context(), savedState, validBlockRoot)) chain.State = savedState tests := []struct { @@ -264,7 +264,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) { helpers.ClearCache() chain.ValidAttestation = tt.validAttestationSignature if tt.validAttestationSignature { - com, err := helpers.BeaconCommitteeFromState(context.Background(), savedState, tt.msg.GetData().Slot, tt.msg.GetData().CommitteeIndex) + com, err := helpers.BeaconCommitteeFromState(t.Context(), savedState, tt.msg.GetData().Slot, tt.msg.GetData().CommitteeIndex) require.NoError(t, err) domain, err := signing.Domain(savedState.Fork(), tt.msg.GetData().Target.Epoch, params.BeaconConfig().DomainBeaconAttester, savedState.GenesisValidatorsRoot()) require.NoError(t, err) @@ -331,7 +331,7 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) { DB: db, Optimistic: true, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() s := &Service{ ctx: ctx, @@ -367,7 +367,7 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) { validators := uint64(64) savedState, keys := util.DeterministicGenesisState(t, validators) require.NoError(t, savedState.SetSlot(1)) - require.NoError(t, db.SaveState(context.Background(), savedState, validBlockRoot)) + require.NoError(t, db.SaveState(t.Context(), savedState, validBlockRoot)) chain.State = savedState committee, err := helpers.BeaconCommitteeFromState(ctx, savedState, 1, 0) require.NoError(t, err) @@ -433,7 +433,7 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { helpers.ClearCache() - com, err := helpers.BeaconCommitteeFromState(context.Background(), savedState, tt.msg.GetData().Slot, tt.msg.GetData().CommitteeIndex) + com, err := helpers.BeaconCommitteeFromState(t.Context(), savedState, tt.msg.GetData().Slot, tt.msg.GetData().CommitteeIndex) require.NoError(t, err) domain, err := signing.Domain(savedState.Fork(), tt.msg.GetData().Target.Epoch, params.BeaconConfig().DomainBeaconAttester, savedState.GenesisValidatorsRoot()) require.NoError(t, err) @@ -467,7 +467,7 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) { } func TestService_setSeenUnaggregatedAtt(t *testing.T) { - s := NewService(context.Background(), WithP2P(p2ptest.NewTestP2P(t))) + s := NewService(t.Context(), WithP2P(p2ptest.NewTestP2P(t))) t.Run("phase0", func(t *testing.T) { s.initCaches() diff --git a/beacon-chain/sync/validate_beacon_blocks_test.go b/beacon-chain/sync/validate_beacon_blocks_test.go index f4cb574f31..571fad1fea 100644 --- a/beacon-chain/sync/validate_beacon_blocks_test.go +++ b/beacon-chain/sync/validate_beacon_blocks_test.go @@ -47,7 +47,7 @@ import ( func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(t, ctx, db, parentBlock) @@ -110,13 +110,13 @@ func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) { func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) { db := dbtest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) msg := util.NewBeaconBlock() msg.Block.Slot = 100 msg.Block.ParentRoot = util.Random32Bytes(t) - util.SaveBlock(t, context.Background(), db, msg) + util.SaveBlock(t, t.Context(), db, msg) chainService := &mock.ChainService{Genesis: time.Now()} r := &Service{ @@ -154,7 +154,7 @@ func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) { func TestValidateBeaconBlockPubSub_CanRecoverStateSummary(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(t, ctx, db, parentBlock) @@ -219,7 +219,7 @@ func TestValidateBeaconBlockPubSub_CanRecoverStateSummary(t *testing.T) { func TestValidateBeaconBlockPubSub_IsInCache(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() bRoot, err := parentBlock.Block.HashTreeRoot() @@ -228,7 +228,7 @@ func TestValidateBeaconBlockPubSub_IsInCache(t *testing.T) { require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]})) copied := beaconState.Copy() require.NoError(t, copied.SetSlot(1)) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), copied) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), copied) require.NoError(t, err) msg := util.NewBeaconBlock() msg.Block.ParentRoot = bRoot[:] @@ -285,7 +285,7 @@ func TestValidateBeaconBlockPubSub_IsInCache(t *testing.T) { func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(t, ctx, db, parentBlock) @@ -351,7 +351,7 @@ func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) { func TestValidateBeaconBlockPubSub_WithLookahead(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(t, ctx, db, parentBlock) @@ -362,7 +362,7 @@ func TestValidateBeaconBlockPubSub_WithLookahead(t *testing.T) { copied := beaconState.Copy() // The next block is only 1 epoch ahead so as to not induce a new seed. blkSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(coreTime.NextEpoch(copied))) - copied, err = transition.ProcessSlots(context.Background(), copied, blkSlot) + copied, err = transition.ProcessSlots(t.Context(), copied, blkSlot) require.NoError(t, err) proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied) require.NoError(t, err) @@ -420,7 +420,7 @@ func TestValidateBeaconBlockPubSub_WithLookahead(t *testing.T) { func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(t, ctx, db, parentBlock) @@ -431,7 +431,7 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) { copied := beaconState.Copy() // The next block is at least 2 epochs ahead to induce shuffling and a new seed. blkSlot := params.BeaconConfig().SlotsPerEpoch * 2 - copied, err = transition.ProcessSlots(context.Background(), copied, blkSlot) + copied, err = transition.ProcessSlots(t.Context(), copied, blkSlot) require.NoError(t, err) proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied) require.NoError(t, err) @@ -488,7 +488,7 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) { func TestValidateBeaconBlockPubSub_Syncing(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() b := []byte("sk") b32 := bytesutil.ToBytes32(b) sk, err := bls.SecretKeyFromBytes(b32[:]) @@ -529,7 +529,7 @@ func TestValidateBeaconBlockPubSub_Syncing(t *testing.T) { func TestValidateBeaconBlockPubSub_IgnoreAndQueueBlocksFromNearFuture(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() @@ -598,7 +598,7 @@ func TestValidateBeaconBlockPubSub_IgnoreAndQueueBlocksFromNearFuture(t *testing func TestValidateBeaconBlockPubSub_RejectBlocksFromFuture(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() b := []byte("sk") b32 := bytesutil.ToBytes32(b) sk, err := bls.SecretKeyFromBytes(b32[:]) @@ -648,7 +648,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) { b := []byte("sk") b32 := bytesutil.ToBytes32(b) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() sk, err := bls.SecretKeyFromBytes(b32[:]) require.NoError(t, err) msg := util.NewBeaconBlock() @@ -697,7 +697,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) { func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(t, ctx, db, parentBlock) @@ -784,7 +784,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) { p := p2ptest.NewTestP2P(t) parent := util.NewBeaconBlock() - util.SaveBlock(t, context.Background(), db, parent) + util.SaveBlock(t, t.Context(), db, parent) parentRoot, err := parent.Block.HashTreeRoot() require.NoError(t, err) chain := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0), @@ -824,7 +824,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) { }, } - res, err := r.validateBeaconBlockPubSub(context.Background(), "", m) + res, err := r.validateBeaconBlockPubSub(t.Context(), "", m) _ = err assert.Equal(t, pubsub.ValidationIgnore, res) @@ -840,7 +840,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) { }, } - res, err = r.validateBeaconBlockPubSub(context.Background(), "", m) + res, err = r.validateBeaconBlockPubSub(t.Context(), "", m) assert.NoError(t, err) assert.Equal(t, pubsub.ValidationIgnore, res) } @@ -848,7 +848,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) { func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(t, ctx, db, parentBlock) @@ -915,7 +915,7 @@ func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) { func TestValidateBeaconBlockPubSub_InvalidParentBlock(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(t, ctx, db, parentBlock) @@ -1014,7 +1014,7 @@ func TestValidateBeaconBlockPubSub_InvalidParentBlock(t *testing.T) { func TestValidateBeaconBlockPubSub_InsertValidPendingBlock(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() util.SaveBlock(t, ctx, db, parentBlock) @@ -1078,7 +1078,7 @@ func TestValidateBeaconBlockPubSub_InsertValidPendingBlock(t *testing.T) { func TestValidateBeaconBlockPubSub_RejectBlocksFromBadParent(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisState(t, 100) parentBlock := util.NewBeaconBlock() @@ -1092,7 +1092,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromBadParent(t *testing.T) { copied := beaconState.Copy() // The next block is at least 2 epochs ahead to induce shuffling and a new seed. blkSlot := params.BeaconConfig().SlotsPerEpoch * 2 - copied, err = transition.ProcessSlots(context.Background(), copied, blkSlot) + copied, err = transition.ProcessSlots(t.Context(), copied, blkSlot) require.NoError(t, err) proposerIdx, err := helpers.BeaconProposerIndex(ctx, copied) require.NoError(t, err) @@ -1162,7 +1162,7 @@ func TestService_setBadBlock_DoesntSetWithContextErr(t *testing.T) { s.initCaches() root := [32]byte{'b', 'a', 'd'} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() s.setBadBlock(ctx, root) if s.hasBadBlock(root) { @@ -1189,7 +1189,7 @@ func TestService_isBlockQueueable(t *testing.T) { func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, 100) parentBlock := util.NewBeaconBlockBellatrix() util.SaveBlock(t, ctx, db, parentBlock) @@ -1262,7 +1262,7 @@ func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) { func TestValidateBeaconBlockPubSub_InvalidPayloadTimestamp(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, 100) parentBlock := util.NewBeaconBlockBellatrix() util.SaveBlock(t, ctx, db, parentBlock) @@ -1333,7 +1333,7 @@ func TestValidateBeaconBlockPubSub_InvalidPayloadTimestamp(t *testing.T) { func Test_validateBellatrixBeaconBlock(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() stateGen := stategen.New(db, doublylinkedtree.New()) presentTime := time.Now().Unix() chainService := &mock.ChainService{Genesis: time.Unix(presentTime-int64(params.BeaconConfig().SecondsPerSlot), 0), @@ -1364,7 +1364,7 @@ func Test_validateBellatrixBeaconBlock(t *testing.T) { func Test_validateBellatrixBeaconBlockParentValidation(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() stateGen := stategen.New(db, doublylinkedtree.New()) beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, 100) @@ -1421,7 +1421,7 @@ func Test_validateBellatrixBeaconBlockParentValidation(t *testing.T) { func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) { db := dbtest.SetupDB(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() stateGen := stategen.New(db, doublylinkedtree.New()) beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, 100) @@ -1520,7 +1520,7 @@ func Test_validateDenebBeaconBlock(t *testing.T) { } func TestDetectAndBroadcastEquivocation(t *testing.T) { - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) beaconState, privKeys := util.DeterministicGenesisState(t, 100) diff --git a/beacon-chain/sync/validate_blob_test.go b/beacon-chain/sync/validate_blob_test.go index 48009a7929..259c35cceb 100644 --- a/beacon-chain/sync/validate_blob_test.go +++ b/beacon-chain/sync/validate_blob_test.go @@ -2,7 +2,6 @@ package sync import ( "bytes" - "context" "reflect" "testing" "time" @@ -28,7 +27,7 @@ import ( ) func TestValidateBlob_FromSelf(t *testing.T) { - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) s := &Service{cfg: &config{p2p: p}} result, err := s.validateBlob(ctx, s.cfg.p2p.PeerID(), nil) @@ -37,7 +36,7 @@ func TestValidateBlob_FromSelf(t *testing.T) { } func TestValidateBlob_InitSync(t *testing.T) { - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{IsSyncing: true}}} result, err := s.validateBlob(ctx, "", nil) @@ -46,7 +45,7 @@ func TestValidateBlob_InitSync(t *testing.T) { } func TestValidateBlob_InvalidTopic(t *testing.T) { - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}} result, err := s.validateBlob(ctx, "", &pubsub.Message{ @@ -57,7 +56,7 @@ func TestValidateBlob_InvalidTopic(t *testing.T) { } func TestValidateBlob_InvalidMessageType(t *testing.T) { - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)} s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}} @@ -83,7 +82,7 @@ func TestValidateBlob_InvalidMessageType(t *testing.T) { func TestValidateBlob_AlreadySeenInCache(t *testing.T) { db := dbtest.SetupDB(t) - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) chainService := &mock.ChainService{Genesis: time.Now(), FinalizedCheckPoint: ð.Checkpoint{}, DB: db} stateGen := stategen.New(db, doublylinkedtree.New()) @@ -143,7 +142,7 @@ func TestValidateBlob_AlreadySeenInCache(t *testing.T) { } func TestValidateBlob_InvalidTopicIndex(t *testing.T) { - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)} s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}} @@ -255,7 +254,7 @@ func TestValidateBlob_ErrorPathsWithMock(t *testing.T) { } for _, tt := range tests { t.Run(tt.error.Error(), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)} s := &Service{ diff --git a/beacon-chain/sync/validate_bls_to_execution_change_test.go b/beacon-chain/sync/validate_bls_to_execution_change_test.go index 9ef6f2e738..5b6634f082 100644 --- a/beacon-chain/sync/validate_bls_to_execution_change_test.go +++ b/beacon-chain/sync/validate_bls_to_execution_change_test.go @@ -420,7 +420,7 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/beacon-chain/sync/validate_data_column_test.go b/beacon-chain/sync/validate_data_column_test.go index 8cbe0df4a3..4cb5526a86 100644 --- a/beacon-chain/sync/validate_data_column_test.go +++ b/beacon-chain/sync/validate_data_column_test.go @@ -2,7 +2,6 @@ package sync import ( "bytes" - "context" "errors" "reflect" "testing" @@ -27,7 +26,7 @@ import ( ) func TestValidateDataColumn(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("from self", func(t *testing.T) { p := p2ptest.NewTestP2P(t) diff --git a/beacon-chain/sync/validate_light_client_test.go b/beacon-chain/sync/validate_light_client_test.go index 5dcae775ea..1d986c877b 100644 --- a/beacon-chain/sync/validate_light_client_test.go +++ b/beacon-chain/sync/validate_light_client_test.go @@ -2,7 +2,6 @@ package sync import ( "bytes" - "context" "testing" "time" @@ -23,7 +22,7 @@ import ( func TestValidateLightClientOptimisticUpdate_NilMessageOrTopic(t *testing.T) { params.SetupTestConfigCleanup(t) - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}} @@ -98,7 +97,7 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) { for _, test := range tests { for v := 1; v < 6; v++ { t.Run(test.name+"_"+version.String(v), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) // drift back appropriate number of epochs based on fork + 2 slots for signature slot + time for gossip propagation + any extra drift genesisDrift := v*slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals + test.genesisDrift @@ -145,7 +144,7 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) { func TestValidateLightClientFinalityUpdate_NilMessageOrTopic(t *testing.T) { params.SetupTestConfigCleanup(t) - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}} @@ -238,7 +237,7 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) { for _, test := range tests { for v := 1; v < 6; v++ { t.Run(test.name+"_"+version.String(v), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() p := p2ptest.NewTestP2P(t) // drift back appropriate number of epochs based on fork + 2 slots for signature slot + time for gossip propagation + any extra drift genesisDrift := v*slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals + test.genesisDrift diff --git a/beacon-chain/sync/validate_proposer_slashing_test.go b/beacon-chain/sync/validate_proposer_slashing_test.go index b2576c6791..7ca4ea5725 100644 --- a/beacon-chain/sync/validate_proposer_slashing_test.go +++ b/beacon-chain/sync/validate_proposer_slashing_test.go @@ -110,7 +110,7 @@ func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, state.Be func TestValidateProposerSlashing_ValidSlashing(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() slashing, s := setupValidProposerSlashing(t) @@ -149,7 +149,7 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) { func TestValidateProposerSlashing_ValidOldSlashing(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() slashing, s := setupValidProposerSlashing(t) val, err := s.ValidatorAtIndex(slashing.Header_2.Header.ProposerIndex) @@ -198,7 +198,7 @@ func TestValidateProposerSlashing_ContextTimeout(t *testing.T) { require.NoError(t, err) err = st.SetPreviousJustifiedCheckpoint(ðpb.Checkpoint{Epoch: 0, Root: []byte{}}) require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancel := context.WithTimeout(t.Context(), 100*time.Millisecond) defer cancel() r := &Service{ @@ -228,7 +228,7 @@ func TestValidateProposerSlashing_ContextTimeout(t *testing.T) { func TestValidateProposerSlashing_Syncing(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() slashing, s := setupValidProposerSlashing(t) diff --git a/beacon-chain/sync/validate_sync_committee_message_test.go b/beacon-chain/sync/validate_sync_committee_message_test.go index 230c817899..7685170fe1 100644 --- a/beacon-chain/sync/validate_sync_committee_message_test.go +++ b/beacon-chain/sync/validate_sync_committee_message_test.go @@ -35,7 +35,7 @@ import ( func TestService_ValidateSyncCommitteeMessage(t *testing.T) { beaconDB := testingdb.SetupDB(t) - headRoot, keys := fillUpBlocksAndState(context.Background(), t, beaconDB) + headRoot, keys := fillUpBlocksAndState(t.Context(), t, beaconDB) defaultTopic := p2p.SyncCommitteeSubnetTopicFormat fakeDigest := []byte{0xAB, 0x00, 0xCC, 0x9E} defaultTopic = defaultTopic + "/" + encoder.ProtocolSuffixSSZSnappy @@ -207,7 +207,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { s.cfg.beaconDB = beaconDB s.initCaches() msg.BlockRoot = headRoot[:] - hState, err := beaconDB.State(context.Background(), headRoot) + hState, err := beaconDB.State(t.Context(), headRoot) assert.NoError(t, err) s.cfg.chain = &mockChain.ChainService{ SyncCommitteeIndices: []primitives.CommitteeIndex{0}, @@ -254,7 +254,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { s.cfg.beaconDB = beaconDB s.initCaches() msg.BlockRoot = headRoot[:] - hState, err := beaconDB.State(context.Background(), headRoot) + hState, err := beaconDB.State(t.Context(), headRoot) assert.NoError(t, err) s.cfg.chain = &mockChain.ChainService{ Genesis: time.Now(), @@ -300,7 +300,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { s.cfg.beaconDB = beaconDB s.initCaches() msg.BlockRoot = headRoot[:] - hState, err := beaconDB.State(context.Background(), headRoot) + hState, err := beaconDB.State(t.Context(), headRoot) assert.NoError(t, err) numOfVals := hState.NumValidators() @@ -354,7 +354,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { s.cfg.beaconDB = beaconDB s.initCaches() msg.BlockRoot = headRoot[:] - hState, err := beaconDB.State(context.Background(), headRoot) + hState, err := beaconDB.State(t.Context(), headRoot) assert.NoError(t, err) subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount @@ -402,7 +402,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -487,8 +487,8 @@ func TestService_ignoreHasSeenSyncMsg(t *testing.T) { cfg: &config{chain: &mockChain.ChainService{}}, } s, _ = tt.setupSvc(s, tt.msg, "") - f := s.ignoreHasSeenSyncMsg(context.Background(), tt.msg, tt.committee) - result, err := f(context.Background()) + f := s.ignoreHasSeenSyncMsg(t.Context(), tt.msg, tt.committee) + result, err := f(t.Context()) _ = err require.Equal(t, tt.want, result) }) @@ -541,7 +541,7 @@ func TestService_rejectIncorrectSyncCommittee(t *testing.T) { } topic := tt.setupTopic(s) f := s.rejectIncorrectSyncCommittee(tt.committeeIndices, topic) - result, err := f(context.Background()) + result, err := f(t.Context()) _ = err require.Equal(t, tt.want, result) }) @@ -573,7 +573,7 @@ func Test_ignoreEmptyCommittee(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { f := ignoreEmptyCommittee(tt.committee) - result, err := f(context.Background()) + result, err := f(t.Context()) _ = err require.Equal(t, tt.want, result) }) diff --git a/beacon-chain/sync/validate_sync_contribution_proof_test.go b/beacon-chain/sync/validate_sync_contribution_proof_test.go index a4f1c14f03..3700843361 100644 --- a/beacon-chain/sync/validate_sync_contribution_proof_test.go +++ b/beacon-chain/sync/validate_sync_contribution_proof_test.go @@ -43,7 +43,7 @@ import ( func TestService_ValidateSyncContributionAndProof(t *testing.T) { database := testingdb.SetupDB(t) - headRoot, keys := fillUpBlocksAndState(context.Background(), t, database) + headRoot, keys := fillUpBlocksAndState(t.Context(), t, database) defaultTopic := p2p.SyncContributionAndProofSubnetTopicFormat defaultTopic = fmt.Sprintf(defaultTopic, []byte{0xAB, 0x00, 0xCC, 0x9E}) defaultTopic = defaultTopic + "/" + encoder.ProtocolSuffixSSZSnappy @@ -306,7 +306,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { Genesis: time.Now(), } msg.Message.Contribution.BlockRoot = headRoot[:] - hState, err := database.State(context.Background(), headRoot) + hState, err := database.State(t.Context(), headRoot) assert.NoError(t, err) sc, err := hState.CurrentSyncCommittee() assert.NoError(t, err) @@ -362,7 +362,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database msg.Message.Contribution.BlockRoot = headRoot[:] - hState, err := database.State(context.Background(), headRoot) + hState, err := database.State(t.Context(), headRoot) assert.NoError(t, err) sc, err := hState.CurrentSyncCommittee() assert.NoError(t, err) @@ -426,7 +426,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { s.cfg.beaconDB = database s.cfg.chain = chainService msg.Message.Contribution.BlockRoot = headRoot[:] - hState, err := database.State(context.Background(), headRoot) + hState, err := database.State(t.Context(), headRoot) assert.NoError(t, err) sc, err := hState.CurrentSyncCommittee() assert.NoError(t, err) @@ -503,7 +503,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) s.cfg.beaconDB = database msg.Message.Contribution.BlockRoot = headRoot[:] - hState, err := database.State(context.Background(), headRoot) + hState, err := database.State(t.Context(), headRoot) assert.NoError(t, err) sc, err := hState.CurrentSyncCommittee() assert.NoError(t, err) @@ -583,7 +583,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] s.cfg.beaconDB = database - hState, err := database.State(context.Background(), headRoot) + hState, err := database.State(t.Context(), headRoot) assert.NoError(t, err) sc, err := hState.CurrentSyncCommittee() assert.NoError(t, err) @@ -665,7 +665,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] s.cfg.beaconDB = database - hState, err := database.State(context.Background(), headRoot) + hState, err := database.State(t.Context(), headRoot) assert.NoError(t, err) sc, err := hState.CurrentSyncCommittee() assert.NoError(t, err) @@ -759,7 +759,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] s.cfg.beaconDB = database - hState, err := database.State(context.Background(), headRoot) + hState, err := database.State(t.Context(), headRoot) assert.NoError(t, err) sc, err := hState.CurrentSyncCommittee() assert.NoError(t, err) @@ -847,7 +847,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctx, cancel := context.WithCancel(ctx) defer cancel() cw := startup.NewClockSynchronizer() @@ -887,7 +887,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) { } func TestValidateSyncContributionAndProof(t *testing.T) { - ctx := context.Background() + ctx := t.Context() database := testingdb.SetupDB(t) headRoot, keys := fillUpBlocksAndState(ctx, t, database) defaultTopic := p2p.SyncContributionAndProofSubnetTopicFormat @@ -913,7 +913,7 @@ func TestValidateSyncContributionAndProof(t *testing.T) { Genesis: time.Now(), ValidatorsRoot: [32]byte{'A'}, } - s := NewService(context.Background(), + s := NewService(t.Context(), WithP2P(mockp2p.NewTestP2P(t)), WithInitialSync(&mockSync.Sync{IsSyncing: false}), WithChainService(chainService), @@ -924,7 +924,7 @@ func TestValidateSyncContributionAndProof(t *testing.T) { s.cfg.stateGen = stategen.New(database, doublylinkedtree.New()) msg.Message.Contribution.BlockRoot = headRoot[:] s.cfg.beaconDB = database - hState, err := database.State(context.Background(), headRoot) + hState, err := database.State(t.Context(), headRoot) assert.NoError(t, err) sc, err := hState.CurrentSyncCommittee() assert.NoError(t, err) @@ -1029,7 +1029,7 @@ func fillUpBlocksAndState(ctx context.Context, t *testing.T, beaconDB db.Databas sCom, err := altair.NextSyncCommittee(ctx, gs) assert.NoError(t, err) assert.NoError(t, gs.SetCurrentSyncCommittee(sCom)) - assert.NoError(t, beaconDB.SaveGenesisData(context.Background(), gs)) + assert.NoError(t, beaconDB.SaveGenesisData(t.Context(), gs)) testState := gs.Copy() var hRoot [32]byte @@ -1061,7 +1061,7 @@ func syncSelectionProofSigningRoot(st state.BeaconState, slot primitives.Slot, c } func TestService_setSyncContributionIndexSlotSeen(t *testing.T) { - s := NewService(context.Background(), WithP2P(mockp2p.NewTestP2P(t))) + s := NewService(t.Context(), WithP2P(mockp2p.NewTestP2P(t))) s.initCaches() // Empty cache diff --git a/beacon-chain/sync/validate_voluntary_exit_test.go b/beacon-chain/sync/validate_voluntary_exit_test.go index bb9db362a2..ce44f273ed 100644 --- a/beacon-chain/sync/validate_voluntary_exit_test.go +++ b/beacon-chain/sync/validate_voluntary_exit_test.go @@ -2,7 +2,6 @@ package sync import ( "bytes" - "context" "crypto/rand" "math" "reflect" @@ -79,7 +78,7 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) { params.SetupTestConfigCleanup(t) p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() exit, s := setupValidExit(t) @@ -140,7 +139,7 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) { func TestValidateVoluntaryExit_InvalidExitSlot(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() exit, s := setupValidExit(t) // Set state slot to 1 to cause exit object fail to verify. @@ -174,7 +173,7 @@ func TestValidateVoluntaryExit_InvalidExitSlot(t *testing.T) { func TestValidateVoluntaryExit_ValidExit_Syncing(t *testing.T) { p := p2ptest.NewTestP2P(t) - ctx := context.Background() + ctx := t.Context() exit, s := setupValidExit(t) diff --git a/beacon-chain/verification/batch_test.go b/beacon-chain/verification/batch_test.go index 19590daf0c..97a743f693 100644 --- a/beacon-chain/verification/batch_test.go +++ b/beacon-chain/verification/batch_test.go @@ -1,7 +1,6 @@ package verification import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/consensus-types/blocks" @@ -12,7 +11,7 @@ import ( ) func TestBatchVerifier(t *testing.T) { - ctx := context.Background() + ctx := t.Context() mockCV := func(err error) roblobCommitmentVerifier { return func(...blocks.ROBlob) error { return err diff --git a/beacon-chain/verification/blob_test.go b/beacon-chain/verification/blob_test.go index f9188ccfe9..25753f290f 100644 --- a/beacon-chain/verification/blob_test.go +++ b/beacon-chain/verification/blob_test.go @@ -128,7 +128,7 @@ func TestSlotAboveFinalized(t *testing.T) { } func TestValidProposerSignature_Cached(t *testing.T) { - ctx := context.Background() + ctx := t.Context() _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) b := blobs[0] expectedSd := blobToSignatureData(b) @@ -165,7 +165,7 @@ func TestValidProposerSignature_Cached(t *testing.T) { } func TestValidProposerSignature_CacheMiss(t *testing.T) { - ctx := context.Background() + ctx := t.Context() _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) b := blobs[0] expectedSd := blobToSignatureData(b) @@ -446,7 +446,7 @@ func TestSidecarKzgProofVerified(t *testing.T) { } func TestSidecarProposerExpected(t *testing.T) { - ctx := context.Background() + ctx := t.Context() _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) b := blobs[0] t.Run("cached, matches", func(t *testing.T) { diff --git a/beacon-chain/verification/cache_test.go b/beacon-chain/verification/cache_test.go index cce92aed23..06fba601dc 100644 --- a/beacon-chain/verification/cache_test.go +++ b/beacon-chain/verification/cache_test.go @@ -1,7 +1,6 @@ package verification import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing" @@ -98,7 +97,7 @@ func (m *mockValidatorAtIndexer) ValidatorAtIndex(idx primitives.ValidatorIndex) var _ validatorAtIndexer = &mockValidatorAtIndexer{} func TestProposerCache(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // 3 validators because that was the first number that produced a non-zero proposer index by default st, _ := util.DeterministicGenesisStateDeneb(t, 3) diff --git a/beacon-chain/verification/data_column_test.go b/beacon-chain/verification/data_column_test.go index d42a191208..d16c4b9a43 100644 --- a/beacon-chain/verification/data_column_test.go +++ b/beacon-chain/verification/data_column_test.go @@ -383,7 +383,7 @@ func TestValidProposerSignature(t *testing.T) { } verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements) - err := verifier.ValidProposerSignature(context.Background()) + err := verifier.ValidProposerSignature(t.Context()) require.Equal(t, true, verifier.results.executed(RequireValidProposerSignature)) if tc.isError { @@ -395,7 +395,7 @@ func TestValidProposerSignature(t *testing.T) { require.NoError(t, err) require.NoError(t, verifier.results.result(RequireValidProposerSignature)) - err = verifier.ValidProposerSignature(context.Background()) + err = verifier.ValidProposerSignature(t.Context()) require.NoError(t, err) }) } @@ -914,7 +914,7 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) { } verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements) - err := verifier.SidecarProposerExpected(context.Background()) + err := verifier.SidecarProposerExpected(t.Context()) require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected)) @@ -927,7 +927,7 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) { require.NoError(t, err) require.NoError(t, verifier.results.result(RequireSidecarProposerExpected)) - err = verifier.SidecarProposerExpected(context.Background()) + err = verifier.SidecarProposerExpected(t.Context()) require.NoError(t, err) }) } diff --git a/beacon-chain/verification/initializer_test.go b/beacon-chain/verification/initializer_test.go index ae6b7dbed1..59810159d1 100644 --- a/beacon-chain/verification/initializer_test.go +++ b/beacon-chain/verification/initializer_test.go @@ -2,7 +2,6 @@ package verification import ( "bytes" - "context" "testing" "time" @@ -12,7 +11,7 @@ import ( ) func TestInitializerWaiter(t *testing.T) { - ctx := context.Background() + ctx := t.Context() vr := bytesutil.ToBytes32([]byte{0, 1, 1, 2, 3, 5}) gen := time.Now() c := startup.NewClock(gen, vr) diff --git a/cache/nonblocking/lru_test.go b/cache/nonblocking/lru_test.go index 11042da0c0..f909bf04c8 100644 --- a/cache/nonblocking/lru_test.go +++ b/cache/nonblocking/lru_test.go @@ -16,7 +16,7 @@ func TestLRU_Concurrency(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + ctx, cancel := context.WithTimeout(t.Context(), time.Second*2) defer cancel() for i := 0; i < 100; i++ { go func(j int) { diff --git a/changelog/pvl-testing-context.md b/changelog/pvl-testing-context.md new file mode 100644 index 0000000000..67e1d73a88 --- /dev/null +++ b/changelog/pvl-testing-context.md @@ -0,0 +1,3 @@ +### Ignored + +- Replaced context.Background, etc to use t.Context. This is only in test code and does not change much. diff --git a/cmd/validator/slashing-protection/import_export_test.go b/cmd/validator/slashing-protection/import_export_test.go index 33b7b09174..86c2867e56 100644 --- a/cmd/validator/slashing-protection/import_export_test.go +++ b/cmd/validator/slashing-protection/import_export_test.go @@ -3,6 +3,9 @@ package historycmd import ( "encoding/json" "flag" + "path/filepath" + "testing" + "github.com/OffchainLabs/prysm/v6/cmd" "github.com/OffchainLabs/prysm/v6/cmd/validator/flags" "github.com/OffchainLabs/prysm/v6/io/file" @@ -13,8 +16,6 @@ import ( "github.com/OffchainLabs/prysm/v6/validator/slashing-protection-history/format" mocks "github.com/OffchainLabs/prysm/v6/validator/testing" "github.com/urfave/cli/v2" - "path/filepath" - "testing" ) func setupCliCtx( diff --git a/cmd/validator/wallet/recover_test.go b/cmd/validator/wallet/recover_test.go index 2307ecd028..42c641a1d1 100644 --- a/cmd/validator/wallet/recover_test.go +++ b/cmd/validator/wallet/recover_test.go @@ -1,7 +1,6 @@ package wallet import ( - "context" "flag" "os" "path/filepath" @@ -72,7 +71,7 @@ func TestRecoverDerivedWallet(t *testing.T) { cliCtx := createRecoverCliCtx(t, cfg) require.NoError(t, walletRecover(cliCtx)) - ctx := context.Background() + ctx := t.Context() w, err := wallet.OpenWallet(cliCtx.Context, &wallet.Config{ WalletDir: cfg.walletDir, WalletPassword: password, diff --git a/config/proposer/loader/loader_test.go b/config/proposer/loader/loader_test.go index 190a3551b3..2043b4bc25 100644 --- a/config/proposer/loader/loader_test.go +++ b/config/proposer/loader/loader_test.go @@ -1,7 +1,6 @@ package loader import ( - "context" "flag" "fmt" "net/http" @@ -83,7 +82,7 @@ func TestProposerSettingsLoader(t *testing.T) { }, }, } - return db.SaveProposerSettings(context.Background(), settings) + return db.SaveProposerSettings(t.Context(), settings) }, }, { @@ -164,7 +163,7 @@ func TestProposerSettingsLoader(t *testing.T) { }, }, } - return db.SaveProposerSettings(context.Background(), settings) + return db.SaveProposerSettings(t.Context(), settings) }, }, { @@ -218,7 +217,7 @@ func TestProposerSettingsLoader(t *testing.T) { }, }, } - return db.SaveProposerSettings(context.Background(), settings) + return db.SaveProposerSettings(t.Context(), settings) }, validatorRegistrationEnabled: true, }, @@ -731,7 +730,7 @@ func TestProposerSettingsLoader(t *testing.T) { }, }, } - return db.SaveProposerSettings(context.Background(), settings) + return db.SaveProposerSettings(t.Context(), settings) }, }, { @@ -786,7 +785,7 @@ func TestProposerSettingsLoader(t *testing.T) { }, }, } - return db.SaveProposerSettings(context.Background(), settings) + return db.SaveProposerSettings(t.Context(), settings) }, validatorRegistrationEnabled: true, }, @@ -834,7 +833,7 @@ func TestProposerSettingsLoader(t *testing.T) { }, }, } - return db.SaveProposerSettings(context.Background(), settings) + return db.SaveProposerSettings(t.Context(), settings) }, }, { diff --git a/config/util_test.go b/config/util_test.go index c324b1146c..3bf75552c9 100644 --- a/config/util_test.go +++ b/config/util_test.go @@ -1,7 +1,6 @@ package config import ( - "context" "net/http" "net/http/httptest" "os" @@ -23,7 +22,7 @@ func TestUnmarshalFromURL_Success(t *testing.T) { defer server.Close() var result map[string]string - err := UnmarshalFromURL(context.Background(), server.URL, &result) + err := UnmarshalFromURL(t.Context(), server.URL, &result) if err != nil { t.Errorf("UnmarshalFromURL failed: %v", err) } diff --git a/consensus-types/blocks/proofs_test.go b/consensus-types/blocks/proofs_test.go index 91b4f4652b..9604cbe739 100644 --- a/consensus-types/blocks/proofs_test.go +++ b/consensus-types/blocks/proofs_test.go @@ -1,7 +1,6 @@ package blocks import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/container/trie" @@ -16,7 +15,7 @@ func TestComputeBlockBodyFieldRoots_Phase0(t *testing.T) { b, ok := i.(*BeaconBlockBody) require.Equal(t, true, ok) - fieldRoots, err := ComputeBlockBodyFieldRoots(context.Background(), b) + fieldRoots, err := ComputeBlockBodyFieldRoots(t.Context(), b) require.NoError(t, err) trie, err := trie.GenerateTrieFromItems(fieldRoots, 3) require.NoError(t, err) @@ -39,7 +38,7 @@ func TestComputeBlockBodyFieldRoots_Altair(t *testing.T) { b, ok := i.(*BeaconBlockBody) require.Equal(t, true, ok) - fieldRoots, err := ComputeBlockBodyFieldRoots(context.Background(), b) + fieldRoots, err := ComputeBlockBodyFieldRoots(t.Context(), b) require.NoError(t, err) trie, err := trie.GenerateTrieFromItems(fieldRoots, 4) require.NoError(t, err) @@ -62,7 +61,7 @@ func TestComputeBlockBodyFieldRoots_Bellatrix(t *testing.T) { b, ok := i.(*BeaconBlockBody) require.Equal(t, true, ok) - fieldRoots, err := ComputeBlockBodyFieldRoots(context.Background(), b) + fieldRoots, err := ComputeBlockBodyFieldRoots(t.Context(), b) require.NoError(t, err) trie, err := trie.GenerateTrieFromItems(fieldRoots, 4) require.NoError(t, err) @@ -85,7 +84,7 @@ func TestComputeBlockBodyFieldRoots_Capella(t *testing.T) { b, ok := i.(*BeaconBlockBody) require.Equal(t, true, ok) - fieldRoots, err := ComputeBlockBodyFieldRoots(context.Background(), b) + fieldRoots, err := ComputeBlockBodyFieldRoots(t.Context(), b) require.NoError(t, err) trie, err := trie.GenerateTrieFromItems(fieldRoots, 4) require.NoError(t, err) @@ -108,7 +107,7 @@ func TestComputeBlockBodyFieldRoots_Deneb(t *testing.T) { b, ok := i.(*BeaconBlockBody) require.Equal(t, true, ok) - fieldRoots, err := ComputeBlockBodyFieldRoots(context.Background(), b) + fieldRoots, err := ComputeBlockBodyFieldRoots(t.Context(), b) require.NoError(t, err) trie, err := trie.GenerateTrieFromItems(fieldRoots, 4) require.NoError(t, err) @@ -131,7 +130,7 @@ func TestComputeBlockBodyFieldRoots_Electra(t *testing.T) { b, ok := i.(*BeaconBlockBody) require.Equal(t, true, ok) - fieldRoots, err := ComputeBlockBodyFieldRoots(context.Background(), b) + fieldRoots, err := ComputeBlockBodyFieldRoots(t.Context(), b) require.NoError(t, err) trie, err := trie.GenerateTrieFromItems(fieldRoots, 4) require.NoError(t, err) diff --git a/contracts/deposit/contract_test.go b/contracts/deposit/contract_test.go index 0cb6a4b267..24917cafc9 100644 --- a/contracts/deposit/contract_test.go +++ b/contracts/deposit/contract_test.go @@ -1,7 +1,6 @@ package deposit_test import ( - "context" "encoding/binary" "testing" @@ -67,7 +66,7 @@ func TestValidatorRegister_OK(t *testing.T) { }, } - logs, err := testAccount.Backend.Client().FilterLogs(context.Background(), query) + logs, err := testAccount.Backend.Client().FilterLogs(t.Context(), query) assert.NoError(t, err, "Unable to get logs of deposit contract") merkleTreeIndex := make([]uint64, 5) diff --git a/encoding/ssz/detect/configfork_test.go b/encoding/ssz/detect/configfork_test.go index 99b86fa8bc..cffda3eb24 100644 --- a/encoding/ssz/detect/configfork_test.go +++ b/encoding/ssz/detect/configfork_test.go @@ -1,7 +1,6 @@ package detect import ( - "context" "fmt" "testing" @@ -151,7 +150,7 @@ func stateForVersion(v int) (state.BeaconState, error) { } func TestUnmarshalState(t *testing.T) { - ctx := context.Background() + ctx := t.Context() defer util.HackForksMaxuint(t, []int{version.Electra, version.Fulu})() bc := params.BeaconConfig() diff --git a/proto/prysm/v1alpha1/attestation/attestation_utils_test.go b/proto/prysm/v1alpha1/attestation/attestation_utils_test.go index d8ba8eff3f..44a19d3d21 100644 --- a/proto/prysm/v1alpha1/attestation/attestation_utils_test.go +++ b/proto/prysm/v1alpha1/attestation/attestation_utils_test.go @@ -1,7 +1,6 @@ package attestation_test import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -204,7 +203,7 @@ func TestIsValidAttestationIndices(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := attestation.IsValidAttestationIndices(context.Background(), tt.att, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot) + err := attestation.IsValidAttestationIndices(t.Context(), tt.att, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot) if tt.wantedErr != "" { assert.ErrorContains(t, tt.wantedErr, err) } else { @@ -240,7 +239,7 @@ func BenchmarkIsValidAttestationIndices(b *testing.B) { } b.ResetTimer() for i := 0; i < b.N; i++ { - if err := attestation.IsValidAttestationIndices(context.Background(), att, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot); err != nil { + if err := attestation.IsValidAttestationIndices(b.Context(), att, params.BeaconConfig().MaxValidatorsPerCommittee, params.BeaconConfig().MaxCommitteesPerSlot); err != nil { require.NoError(b, err) } } diff --git a/runtime/interop/generate_genesis_state_bellatrix_test.go b/runtime/interop/generate_genesis_state_bellatrix_test.go index ccc20cf791..fdc285f7ef 100644 --- a/runtime/interop/generate_genesis_state_bellatrix_test.go +++ b/runtime/interop/generate_genesis_state_bellatrix_test.go @@ -1,7 +1,6 @@ package interop import ( - "context" "testing" state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native" @@ -34,7 +33,7 @@ func TestGenerateGenesisStateBellatrix(t *testing.T) { DepositCount: 0, BlockHash: make([]byte, 32), } - g, _, err := GenerateGenesisStateBellatrix(context.Background(), 0, params.BeaconConfig().MinGenesisActiveValidatorCount, ep, e1d) + g, _, err := GenerateGenesisStateBellatrix(t.Context(), 0, params.BeaconConfig().MinGenesisActiveValidatorCount, ep, e1d) require.NoError(t, err) tr, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth) diff --git a/runtime/interop/generate_genesis_state_test.go b/runtime/interop/generate_genesis_state_test.go index d8bab7670e..1e51da0554 100644 --- a/runtime/interop/generate_genesis_state_test.go +++ b/runtime/interop/generate_genesis_state_test.go @@ -1,7 +1,6 @@ package interop_test import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition" @@ -25,7 +24,7 @@ func TestGenerateGenesisState(t *testing.T) { require.NoError(t, err) root, err := tr.HashTreeRoot() require.NoError(t, err) - genesisState, err := transition.GenesisBeaconState(context.Background(), deposits, 0, ð.Eth1Data{ + genesisState, err := transition.GenesisBeaconState(t.Context(), deposits, 0, ð.Eth1Data{ DepositRoot: root[:], DepositCount: uint64(len(deposits)), }) diff --git a/runtime/interop/premine-state_test.go b/runtime/interop/premine-state_test.go index f69fe50284..299df9272a 100644 --- a/runtime/interop/premine-state_test.go +++ b/runtime/interop/premine-state_test.go @@ -1,7 +1,6 @@ package interop import ( - "context" "math/big" "testing" @@ -21,6 +20,6 @@ func TestPremineGenesis_Electra(t *testing.T) { ExcessBlobGas: &one, BlobGasUsed: &one, }) - _, err := NewPreminedGenesis(context.Background(), genesis.Time(), 10, 10, version.Electra, genesis) + _, err := NewPreminedGenesis(t.Context(), genesis.Time(), 10, 10, version.Electra, genesis) require.NoError(t, err) } diff --git a/runtime/messagehandler/messagehandler_test.go b/runtime/messagehandler/messagehandler_test.go index 0d2c17791d..365ad8e1df 100644 --- a/runtime/messagehandler/messagehandler_test.go +++ b/runtime/messagehandler/messagehandler_test.go @@ -13,7 +13,7 @@ import ( func TestSafelyHandleMessage(t *testing.T) { hook := logTest.NewGlobal() - messagehandler.SafelyHandleMessage(context.Background(), func(_ context.Context, _ *pubsub.Message) error { + messagehandler.SafelyHandleMessage(t.Context(), func(_ context.Context, _ *pubsub.Message) error { panic("bad!") return nil }, &pubsub.Message{}) @@ -24,7 +24,7 @@ func TestSafelyHandleMessage(t *testing.T) { func TestSafelyHandleMessage_NoData(t *testing.T) { hook := logTest.NewGlobal() - messagehandler.SafelyHandleMessage(context.Background(), func(_ context.Context, _ *pubsub.Message) error { + messagehandler.SafelyHandleMessage(t.Context(), func(_ context.Context, _ *pubsub.Message) error { panic("bad!") return nil }, nil) diff --git a/runtime/prereqs/prereq_test.go b/runtime/prereqs/prereq_test.go index f63d2f4736..3275d2e2f1 100644 --- a/runtime/prereqs/prereq_test.go +++ b/runtime/prereqs/prereq_test.go @@ -13,16 +13,16 @@ func TestMeetsMinPlatformReqs(t *testing.T) { // Linux runtimeOS = "linux" runtimeArch = "amd64" - meetsReqs, err := meetsMinPlatformReqs(context.Background()) + meetsReqs, err := meetsMinPlatformReqs(t.Context()) require.Equal(t, true, meetsReqs) require.NoError(t, err) runtimeArch = "arm64" - meetsReqs, err = meetsMinPlatformReqs(context.Background()) + meetsReqs, err = meetsMinPlatformReqs(t.Context()) require.Equal(t, true, meetsReqs) require.NoError(t, err) // mips64 is not supported runtimeArch = "mips64" - meetsReqs, err = meetsMinPlatformReqs(context.Background()) + meetsReqs, err = meetsMinPlatformReqs(t.Context()) require.Equal(t, false, meetsReqs) require.NoError(t, err) @@ -33,7 +33,7 @@ func TestMeetsMinPlatformReqs(t *testing.T) { } runtimeOS = "darwin" runtimeArch = "amd64" - meetsReqs, err = meetsMinPlatformReqs(context.Background()) + meetsReqs, err = meetsMinPlatformReqs(t.Context()) require.Equal(t, false, meetsReqs) require.ErrorContains(t, "error obtaining MacOS version", err) @@ -41,7 +41,7 @@ func TestMeetsMinPlatformReqs(t *testing.T) { execShellOutput = func(ctx context.Context, command string, args ...string) (string, error) { return "10.4", nil } - meetsReqs, err = meetsMinPlatformReqs(context.Background()) + meetsReqs, err = meetsMinPlatformReqs(t.Context()) require.Equal(t, false, meetsReqs) require.NoError(t, err) @@ -49,7 +49,7 @@ func TestMeetsMinPlatformReqs(t *testing.T) { execShellOutput = func(ctx context.Context, command string, args ...string) (string, error) { return "10.14", nil } - meetsReqs, err = meetsMinPlatformReqs(context.Background()) + meetsReqs, err = meetsMinPlatformReqs(t.Context()) require.Equal(t, true, meetsReqs) require.NoError(t, err) @@ -57,7 +57,7 @@ func TestMeetsMinPlatformReqs(t *testing.T) { execShellOutput = func(ctx context.Context, command string, args ...string) (string, error) { return "10.15.7", nil } - meetsReqs, err = meetsMinPlatformReqs(context.Background()) + meetsReqs, err = meetsMinPlatformReqs(t.Context()) require.Equal(t, true, meetsReqs) require.NoError(t, err) @@ -65,18 +65,18 @@ func TestMeetsMinPlatformReqs(t *testing.T) { execShellOutput = func(ctx context.Context, command string, args ...string) (string, error) { return "tiger.lion", nil } - meetsReqs, err = meetsMinPlatformReqs(context.Background()) + meetsReqs, err = meetsMinPlatformReqs(t.Context()) require.Equal(t, false, meetsReqs) require.ErrorContains(t, "error parsing version", err) // Windows runtimeOS = "windows" runtimeArch = "amd64" - meetsReqs, err = meetsMinPlatformReqs(context.Background()) + meetsReqs, err = meetsMinPlatformReqs(t.Context()) require.Equal(t, true, meetsReqs) require.NoError(t, err) runtimeArch = "arm64" - meetsReqs, err = meetsMinPlatformReqs(context.Background()) + meetsReqs, err = meetsMinPlatformReqs(t.Context()) require.Equal(t, false, meetsReqs) require.NoError(t, err) } @@ -106,7 +106,7 @@ func TestWarnIfNotSupported(t *testing.T) { runtimeOS = "linux" runtimeArch = "amd64" hook := logTest.NewGlobal() - WarnIfPlatformNotSupported(context.Background()) + WarnIfPlatformNotSupported(t.Context()) require.LogsDoNotContain(t, hook, "Failed to detect host platform") require.LogsDoNotContain(t, hook, "platform is not supported") @@ -116,13 +116,13 @@ func TestWarnIfNotSupported(t *testing.T) { runtimeOS = "darwin" runtimeArch = "amd64" hook = logTest.NewGlobal() - WarnIfPlatformNotSupported(context.Background()) + WarnIfPlatformNotSupported(t.Context()) require.LogsContain(t, hook, "Failed to detect host platform") require.LogsContain(t, hook, "error parsing version") runtimeOS = "falseOs" runtimeArch = "falseArch" hook = logTest.NewGlobal() - WarnIfPlatformNotSupported(context.Background()) + WarnIfPlatformNotSupported(t.Context()) require.LogsContain(t, hook, "platform is not supported") } diff --git a/testing/endtoend/component_handler_test.go b/testing/endtoend/component_handler_test.go index a10d4e42dd..28ae2158e6 100644 --- a/testing/endtoend/component_handler_test.go +++ b/testing/endtoend/component_handler_test.go @@ -39,7 +39,7 @@ type componentHandler struct { } func NewComponentHandler(cfg *e2etypes.E2EConfig, t *testing.T) *componentHandler { - ctx, done := context.WithCancel(context.Background()) + ctx, done := context.WithCancel(t.Context()) g, ctx := errgroup.WithContext(ctx) return &componentHandler{ diff --git a/testing/endtoend/components/web3remotesigner_test.go b/testing/endtoend/components/web3remotesigner_test.go index b4844281ca..79de5b2f92 100644 --- a/testing/endtoend/components/web3remotesigner_test.go +++ b/testing/endtoend/components/web3remotesigner_test.go @@ -16,7 +16,7 @@ func TestWeb3RemoteSigner_StartsAndReturnsPublicKeys(t *testing.T) { wsc := components.NewWeb3RemoteSigner() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second) defer cancel() go func() { diff --git a/testing/endtoend/endtoend_test.go b/testing/endtoend/endtoend_test.go index 59ecf30737..cc7e0f1589 100644 --- a/testing/endtoend/endtoend_test.go +++ b/testing/endtoend/endtoend_test.go @@ -493,7 +493,7 @@ func (r *testRunner) defaultEndToEndRun() error { // Calculate genesis time. nodeClient := eth.NewNodeClient(conns[0]) - genesis, err := nodeClient.GetGenesis(context.Background(), &emptypb.Empty{}) + genesis, err := nodeClient.GetGenesis(t.Context(), &emptypb.Empty{}) require.NoError(t, err) tickingStartTime := helpers.EpochTickerStartTime(genesis) @@ -592,7 +592,7 @@ func (r *testRunner) scenarioRun() error { // Calculate genesis time. nodeClient := eth.NewNodeClient(conns[0]) - genesis, err := nodeClient.GetGenesis(context.Background(), &emptypb.Empty{}) + genesis, err := nodeClient.GetGenesis(t.Context(), &emptypb.Empty{}) require.NoError(t, err) tickingStartTime := helpers.EpochTickerStartTime(genesis) diff --git a/testing/endtoend/slasher_simulator_e2e_test.go b/testing/endtoend/slasher_simulator_e2e_test.go index e1c216c1a0..30902007c7 100644 --- a/testing/endtoend/slasher_simulator_e2e_test.go +++ b/testing/endtoend/slasher_simulator_e2e_test.go @@ -52,7 +52,7 @@ func TestEndToEnd_SlasherSimulator(t *testing.T) { params.OverrideBeaconConfig(params.E2ETestConfig().Copy()) hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() // Run for 10 epochs if not in long-running to confirm long-running has no issues. simulatorParams := slashersimulator.DefaultParams() diff --git a/testing/middleware/engine-api-proxy/proxy_test.go b/testing/middleware/engine-api-proxy/proxy_test.go index 488141c490..f59bae0928 100644 --- a/testing/middleware/engine-api-proxy/proxy_test.go +++ b/testing/middleware/engine-api-proxy/proxy_test.go @@ -1,7 +1,6 @@ package proxy import ( - "context" "encoding/json" "net/http" "net/http/httptest" @@ -21,7 +20,7 @@ func TestProxy(t *testing.T) { t.Run("fails to proxy if destination is down", func(t *testing.T) { logger := logrus.New() hook := logTest.NewLocal(logger) - ctx := context.Background() + ctx := t.Context() r := rand.NewGenerator() proxy, err := New( WithPort(r.Intn(50000)), diff --git a/testing/slasher/simulator/attestation_generator_test.go b/testing/slasher/simulator/attestation_generator_test.go index fb9b5bee79..6bd752ba8f 100644 --- a/testing/slasher/simulator/attestation_generator_test.go +++ b/testing/slasher/simulator/attestation_generator_test.go @@ -1,7 +1,6 @@ package simulator import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -11,7 +10,7 @@ import ( ) func TestGenerateAttestationsForSlot_Slashing(t *testing.T) { - ctx := context.Background() + ctx := t.Context() simParams := &Parameters{ SecondsPerSlot: params.BeaconConfig().SecondsPerSlot, SlotsPerEpoch: params.BeaconConfig().SlotsPerEpoch, @@ -37,7 +36,7 @@ func TestGenerateAttestationsForSlot_Slashing(t *testing.T) { } func TestGenerateAttestationsForSlot_CorrectIndices(t *testing.T) { - ctx := context.Background() + ctx := t.Context() simParams := &Parameters{ SecondsPerSlot: params.BeaconConfig().SecondsPerSlot, SlotsPerEpoch: params.BeaconConfig().SlotsPerEpoch, diff --git a/testing/slasher/simulator/block_generator_test.go b/testing/slasher/simulator/block_generator_test.go index 7841db93c5..66e81c0392 100644 --- a/testing/slasher/simulator/block_generator_test.go +++ b/testing/slasher/simulator/block_generator_test.go @@ -2,14 +2,13 @@ package simulator import ( "bytes" - "context" "testing" "github.com/OffchainLabs/prysm/v6/testing/require" ) func TestGenerateBlockHeadersForSlot_Slashing(t *testing.T) { - ctx := context.Background() + ctx := t.Context() simParams := &Parameters{ AggregationPercent: 1, NumValidators: 64, diff --git a/testing/util/bellatrix_state_test.go b/testing/util/bellatrix_state_test.go index 9a6b9668d8..5f383a620f 100644 --- a/testing/util/bellatrix_state_test.go +++ b/testing/util/bellatrix_state_test.go @@ -1,7 +1,6 @@ package util import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -15,7 +14,7 @@ func TestDeterministicGenesisStateBellatrix(t *testing.T) { } func TestGenesisBeaconStateBellatrix(t *testing.T) { - ctx := context.Background() + ctx := t.Context() deposits, _, err := DeterministicDepositsAndKeys(params.BeaconConfig().MaxCommitteesPerSlot) require.NoError(t, err) eth1Data, err := DeterministicEth1Data(len(deposits)) diff --git a/testing/util/block_test.go b/testing/util/block_test.go index b9fe3de555..4a5eebb0b7 100644 --- a/testing/util/block_test.go +++ b/testing/util/block_test.go @@ -1,7 +1,6 @@ package util import ( - "context" "testing" coreBlock "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks" @@ -26,7 +25,7 @@ func TestGenerateFullBlock_PassesStateTransition(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + _, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) require.NoError(t, err) } @@ -41,7 +40,7 @@ func TestGenerateFullBlock_ThousandValidators(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - _, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + _, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) require.NoError(t, err) } @@ -60,7 +59,7 @@ func TestGenerateFullBlock_Passes4Epochs(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + beaconState, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) require.NoError(t, err) } @@ -87,7 +86,7 @@ func TestGenerateFullBlock_ValidProposerSlashings(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + beaconState, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) require.NoError(t, err) slashableIndice := block.Block.Body.ProposerSlashings[0].Header_1.Header.ProposerIndex @@ -108,7 +107,7 @@ func TestGenerateFullBlock_ValidAttesterSlashings(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + beaconState, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) require.NoError(t, err) slashableIndices := block.Block.Body.AttesterSlashings[0].Attestation_1.AttestingIndices @@ -130,7 +129,7 @@ func TestGenerateFullBlock_ValidAttestations(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + beaconState, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) require.NoError(t, err) atts, err := beaconState.CurrentEpochAttestations() require.NoError(t, err) @@ -153,7 +152,7 @@ func TestGenerateFullBlock_ValidDeposits(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + beaconState, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) require.NoError(t, err) depositedPubkey := block.Block.Body.Deposits[0].Data.PublicKey @@ -181,7 +180,7 @@ func TestGenerateFullBlock_ValidVoluntaryExits(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(block) require.NoError(t, err) - beaconState, err = transition.ExecuteStateTransition(context.Background(), beaconState, wsb) + beaconState, err = transition.ExecuteStateTransition(t.Context(), beaconState, wsb) require.NoError(t, err) exitedIndex := block.Block.Body.VoluntaryExits[0].Exit.ValidatorIndex diff --git a/testing/util/deposits_test.go b/testing/util/deposits_test.go index 66f30578b6..7b023b1e60 100644 --- a/testing/util/deposits_test.go +++ b/testing/util/deposits_test.go @@ -2,7 +2,6 @@ package util import ( "bytes" - "context" "encoding/hex" "testing" @@ -255,7 +254,7 @@ func TestSetupInitialDeposits_1024Entries_PartialDeposits(t *testing.T) { func TestDeterministicGenesisState_100Validators(t *testing.T) { validatorCount := uint64(100) beaconState, privKeys := DeterministicGenesisState(t, validatorCount) - activeValidators, err := helpers.ActiveValidatorCount(context.Background(), beaconState, 0) + activeValidators, err := helpers.ActiveValidatorCount(t.Context(), beaconState, 0) require.NoError(t, err) // lint:ignore uintcast -- test code diff --git a/testing/util/helpers_test.go b/testing/util/helpers_test.go index 2949e34b8e..8573c83d32 100644 --- a/testing/util/helpers_test.go +++ b/testing/util/helpers_test.go @@ -2,7 +2,6 @@ package util import ( "bytes" - "context" "encoding/binary" "testing" @@ -23,7 +22,7 @@ func TestBlockSignature(t *testing.T) { require.NoError(t, err) require.NoError(t, beaconState.SetSlot(beaconState.Slot()+1)) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), beaconState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), beaconState) assert.NoError(t, err) assert.NoError(t, beaconState.SetSlot(slots.PrevSlot(beaconState.Slot()))) @@ -46,7 +45,7 @@ func TestRandaoReveal(t *testing.T) { randaoReveal, err := RandaoReveal(beaconState, epoch, privKeys) assert.NoError(t, err) - proposerIdx, err := helpers.BeaconProposerIndex(context.Background(), beaconState) + proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), beaconState) assert.NoError(t, err) buf := make([]byte, fieldparams.RootLength) binary.LittleEndian.PutUint64(buf, uint64(epoch)) diff --git a/testing/util/state_test.go b/testing/util/state_test.go index e15617603d..86f5b27e77 100644 --- a/testing/util/state_test.go +++ b/testing/util/state_test.go @@ -1,7 +1,6 @@ package util import ( - "context" "testing" ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" @@ -72,26 +71,26 @@ func TestNewBeaconStateElectra(t *testing.T) { func TestNewBeaconState_HashTreeRoot(t *testing.T) { st, err := NewBeaconState() require.NoError(t, err) - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) require.NoError(t, err) st, err = NewBeaconStateAltair() require.NoError(t, err) - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) require.NoError(t, err) st, err = NewBeaconStateBellatrix() require.NoError(t, err) - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) require.NoError(t, err) st, err = NewBeaconStateCapella() require.NoError(t, err) - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) require.NoError(t, err) st, err = NewBeaconStateDeneb() require.NoError(t, err) - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) require.NoError(t, err) st, err = NewBeaconStateElectra() require.NoError(t, err) - _, err = st.HashTreeRoot(context.Background()) + _, err = st.HashTreeRoot(t.Context()) require.NoError(t, err) } diff --git a/time/slots/countdown_test.go b/time/slots/countdown_test.go index d6d20d18d8..cccb2c0c6a 100644 --- a/time/slots/countdown_test.go +++ b/time/slots/countdown_test.go @@ -27,7 +27,7 @@ func TestCountdownToGenesis(t *testing.T) { firstStringResult := "1s until chain genesis" genesisReached := "Chain genesis time reached" CountdownToGenesis( - context.Background(), + t.Context(), prysmTime.Now().Add(2*time.Second), params.BeaconConfig().MinGenesisActiveValidatorCount, [32]byte{}, @@ -38,7 +38,7 @@ func TestCountdownToGenesis(t *testing.T) { t.Run("close context", func(t *testing.T) { defer hook.Reset() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) go func() { time.AfterFunc(1500*time.Millisecond, func() { cancel() diff --git a/tools/interop/split-keys/main_test.go b/tools/interop/split-keys/main_test.go index 5ab86cd05b..e1bc657bec 100644 --- a/tools/interop/split-keys/main_test.go +++ b/tools/interop/split-keys/main_test.go @@ -2,7 +2,6 @@ package main import ( "bufio" - "context" "encoding/hex" "fmt" "path/filepath" @@ -69,7 +68,7 @@ func Test_spreadKeysAcrossImportedWallets(t *testing.T) { walletPassword, ) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() for i := 0; i < numWallets; i++ { w, err := wallet.OpenWallet(ctx, &wallet.Config{ WalletDir: filepath.Join(tmpDir, fmt.Sprintf("wallet_%d", i)), diff --git a/validator/accounts/accounts_delete_test.go b/validator/accounts/accounts_delete_test.go index d6acc54061..f68bc0f282 100644 --- a/validator/accounts/accounts_delete_test.go +++ b/validator/accounts/accounts_delete_test.go @@ -2,7 +2,6 @@ package accounts import ( "bytes" - "context" "testing" "github.com/OffchainLabs/prysm/v6/crypto/bls" @@ -16,7 +15,7 @@ import ( func TestDelete(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() // import keys numAccounts := 5 keystores := make([]*keymanager.Keystore, numAccounts) diff --git a/validator/accounts/accounts_import_test.go b/validator/accounts/accounts_import_test.go index 772a6d2814..8a38d6efef 100644 --- a/validator/accounts/accounts_import_test.go +++ b/validator/accounts/accounts_import_test.go @@ -1,7 +1,6 @@ package accounts import ( - "context" "encoding/json" "fmt" "os" @@ -46,7 +45,7 @@ func TestImportAccounts_NoPassword(t *testing.T) { require.NoError(t, err) importer, ok := km.(keymanager.Importer) require.Equal(t, true, ok) - resp, err := ImportAccounts(context.Background(), &ImportAccountsConfig{ + resp, err := ImportAccounts(t.Context(), &ImportAccountsConfig{ Keystores: []*keymanager.Keystore{{}}, Importer: importer, AccountPassword: "", diff --git a/validator/accounts/wallet/wallet_test.go b/validator/accounts/wallet/wallet_test.go index 592a621d46..8dd563a061 100644 --- a/validator/accounts/wallet/wallet_test.go +++ b/validator/accounts/wallet/wallet_test.go @@ -1,7 +1,6 @@ package wallet_test import ( - "context" "flag" "io" "os" @@ -66,7 +65,7 @@ func TestWallet_InitializeKeymanager_web3Signer_HappyPath(t *testing.T) { newDir := filepath.Join(t.TempDir(), "new") set.String(flags.WalletDirFlag.Name, newDir, "") w := wallet.NewWalletForWeb3Signer(cli.NewContext(&app, set, nil)) - ctx := context.Background() + ctx := t.Context() root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69") require.NoError(t, err) config := iface.InitKeymanagerConfig{ @@ -87,7 +86,7 @@ func TestWallet_InitializeKeymanager_web3Signer_nilConfig(t *testing.T) { newDir := filepath.Join(t.TempDir(), "new") set.String(flags.WalletDirFlag.Name, newDir, "") w := wallet.NewWalletForWeb3Signer(cli.NewContext(&app, set, nil)) - ctx := context.Background() + ctx := t.Context() config := iface.InitKeymanagerConfig{ ListenForChanges: false, Web3SignerConfig: nil, diff --git a/validator/client/aggregate_test.go b/validator/client/aggregate_test.go index b7cfd0933a..42649b99f7 100644 --- a/validator/client/aggregate_test.go +++ b/validator/client/aggregate_test.go @@ -32,7 +32,7 @@ func TestSubmitAggregateAndProof_GetDutiesRequestFailure(t *testing.T) { var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) - validator.SubmitAggregateAndProof(context.Background(), 0, pubKey) + validator.SubmitAggregateAndProof(t.Context(), 0, pubKey) require.LogsContain(t, hook, "Could not fetch validator assignment") }) @@ -79,7 +79,7 @@ func TestSubmitAggregateAndProof_SignFails(t *testing.T) { gomock.Any(), // epoch ).Return(ðpb.DomainResponse{SignatureDomain: nil}, errors.New("bad domain root")) - validator.SubmitAggregateAndProof(context.Background(), 0, pubKey) + validator.SubmitAggregateAndProof(t.Context(), 0, pubKey) }) } } @@ -129,7 +129,7 @@ func TestSubmitAggregateAndProof_Ok(t *testing.T) { gomock.AssignableToTypeOf(ðpb.SignedAggregateSubmitRequest{}), ).Return(ðpb.SignedAggregateSubmitResponse{AttestationDataRoot: make([]byte, 32)}, nil) - validator.SubmitAggregateAndProof(context.Background(), 0, pubKey) + validator.SubmitAggregateAndProof(t.Context(), 0, pubKey) }) } for _, isSlashingProtectionMinimal := range [...]bool{false, true} { @@ -182,7 +182,7 @@ func TestSubmitAggregateAndProof_Ok(t *testing.T) { gomock.AssignableToTypeOf(ðpb.SignedAggregateSubmitElectraRequest{}), ).Return(ðpb.SignedAggregateSubmitResponse{AttestationDataRoot: make([]byte, 32)}, nil) - validator.SubmitAggregateAndProof(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(electraForkEpoch), pubKey) + validator.SubmitAggregateAndProof(t.Context(), params.BeaconConfig().SlotsPerEpoch.Mul(electraForkEpoch), pubKey) }) } } @@ -190,7 +190,7 @@ func TestSubmitAggregateAndProof_Ok(t *testing.T) { func TestSubmitAggregateAndProof_Distributed(t *testing.T) { validatorIdx := primitives.ValidatorIndex(123) slot := primitives.Slot(456) - ctx := context.Background() + ctx := t.Context() for _, isSlashingProtectionMinimal := range [...]bool{false, true} { t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) { validator, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal) @@ -261,7 +261,7 @@ func TestWaitForSlotTwoThird_WaitCorrectly(t *testing.T) { timeToSleep := oneThird + oneThird twoThirdTime := currentTime.Add(timeToSleep) - validator.waitToSlotTwoThirds(context.Background(), numOfSlots) + validator.waitToSlotTwoThirds(t.Context(), numOfSlots) currentTime = time.Now() assert.Equal(t, twoThirdTime.Unix(), currentTime.Unix()) }) @@ -278,7 +278,7 @@ func TestWaitForSlotTwoThird_DoneContext_ReturnsImmediately(t *testing.T) { validator.genesisTime = uint64(currentTime.Unix()) - uint64(numOfSlots.Mul(params.BeaconConfig().SecondsPerSlot)) expectedTime := time.Now() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() validator.waitToSlotTwoThirds(ctx, numOfSlots) currentTime = time.Now() @@ -307,7 +307,7 @@ func TestAggregateAndProofSignature_CanSignValidSignature(t *testing.T) { }), SelectionProof: make([]byte, 96), } - sig, err := validator.aggregateAndProofSig(context.Background(), pubKey, agg, 0 /* slot */) + sig, err := validator.aggregateAndProofSig(t.Context(), pubKey, agg, 0 /* slot */) require.NoError(t, err) _, err = bls.SignatureFromBytes(sig) require.NoError(t, err) @@ -338,7 +338,7 @@ func TestAggregateAndProofSignature_CanSignValidSignature(t *testing.T) { }), SelectionProof: make([]byte, 96), } - sig, err := validator.aggregateAndProofSig(context.Background(), pubKey, agg, params.BeaconConfig().SlotsPerEpoch.Mul(electraForkEpoch) /* slot */) + sig, err := validator.aggregateAndProofSig(t.Context(), pubKey, agg, params.BeaconConfig().SlotsPerEpoch.Mul(electraForkEpoch) /* slot */) require.NoError(t, err) _, err = bls.SignatureFromBytes(sig) require.NoError(t, err) diff --git a/validator/client/attest_test.go b/validator/client/attest_test.go index 5b4808d324..d9f45c45e9 100644 --- a/validator/client/attest_test.go +++ b/validator/client/attest_test.go @@ -39,7 +39,7 @@ func TestRequestAttestation_ValidatorDutiesRequestFailure(t *testing.T) { var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) - validator.SubmitAttestation(context.Background(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) require.LogsContain(t, hook, "Could not fetch validator assignment") }) } @@ -60,7 +60,7 @@ func TestAttestToBlockHead_SubmitAttestation_EmptyCommittee(t *testing.T) { CommitteeIndex: 0, ValidatorIndex: 0, }}} - validator.SubmitAttestation(context.Background(), 0, pubKey) + validator.SubmitAttestation(t.Context(), 0, pubKey) require.LogsContain(t, hook, "Empty committee") }) } @@ -99,7 +99,7 @@ func TestAttestToBlockHead_SubmitAttestation_RequestFailure(t *testing.T) { var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) - validator.SubmitAttestation(context.Background(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) require.LogsContain(t, hook, "Could not submit attestation to beacon node") }) } @@ -150,7 +150,7 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) { generatedAttestation = att }).Return(ðpb.AttestResponse{}, nil /* error */) - validator.SubmitAttestation(context.Background(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) aggregationBitfield := bitfield.NewBitlist(uint64(len(committee))) aggregationBitfield.SetBitAt(4, true) @@ -167,7 +167,7 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) { root, err := signing.ComputeSigningRoot(expectedAttestation.Data, make([]byte, 32)) require.NoError(t, err) - sig, err := validator.km.Sign(context.Background(), &validatorpb.SignRequest{ + sig, err := validator.km.Sign(t.Context(), &validatorpb.SignRequest{ PublicKey: validatorKey.PublicKey().Marshal(), SigningRoot: root[:], }) @@ -230,7 +230,7 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) { generatedAttestation = att }).Return(ðpb.AttestResponse{}, nil /* error */) - validator.SubmitAttestation(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(electraForkEpoch), pubKey) + validator.SubmitAttestation(t.Context(), params.BeaconConfig().SlotsPerEpoch.Mul(electraForkEpoch), pubKey) aggregationBitfield := bitfield.NewBitlist(uint64(len(committee))) aggregationBitfield.SetBitAt(4, true) @@ -250,7 +250,7 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) { root, err := signing.ComputeSigningRoot(expectedAttestation.Data, make([]byte, 32)) require.NoError(t, err) - sig, err := validator.km.Sign(context.Background(), &validatorpb.SignRequest{ + sig, err := validator.km.Sign(t.Context(), &validatorpb.SignRequest{ PublicKey: validatorKey.PublicKey().Marshal(), SigningRoot: root[:], }) @@ -315,8 +315,8 @@ func TestAttestToBlockHead_BlocksDoubleAtt(t *testing.T) { gomock.AssignableToTypeOf(ðpb.Attestation{}), ).Return(ðpb.AttestResponse{AttestationDataRoot: make([]byte, 32)}, nil /* error */) - validator.SubmitAttestation(context.Background(), 30, pubKey) - validator.SubmitAttestation(context.Background(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) require.LogsContain(t, hook, "Failed attestation slashing protection") }) } @@ -371,8 +371,8 @@ func TestAttestToBlockHead_BlocksSurroundAtt(t *testing.T) { gomock.AssignableToTypeOf(ðpb.Attestation{}), ).Return(ðpb.AttestResponse{}, nil /* error */) - validator.SubmitAttestation(context.Background(), 30, pubKey) - validator.SubmitAttestation(context.Background(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) require.LogsContain(t, hook, "Failed attestation slashing protection") }) } @@ -419,7 +419,7 @@ func TestAttestToBlockHead_BlocksSurroundedAtt(t *testing.T) { gomock.AssignableToTypeOf(ðpb.Attestation{}), ).Return(ðpb.AttestResponse{}, nil /* error */) - validator.SubmitAttestation(context.Background(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) require.LogsDoNotContain(t, hook, failedAttLocalProtectionErr) m.validatorClient.EXPECT().AttestationData( @@ -431,7 +431,7 @@ func TestAttestToBlockHead_BlocksSurroundedAtt(t *testing.T) { Source: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte("C"), 32), Epoch: 1}, }, nil) - validator.SubmitAttestation(context.Background(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) require.LogsContain(t, hook, "Failed attestation slashing protection") }) } @@ -462,7 +462,7 @@ func TestAttestToBlockHead_DoesNotAttestBeforeDelay(t *testing.T) { ).Return(ðpb.AttestResponse{}, nil /* error */).Times(0) timer := time.NewTimer(1 * time.Second) - go validator.SubmitAttestation(context.Background(), 0, pubKey) + go validator.SubmitAttestation(t.Context(), 0, pubKey) <-timer.C }) } @@ -512,7 +512,7 @@ func TestAttestToBlockHead_DoesAttestAfterDelay(t *testing.T) { gomock.Any(), ).Return(ðpb.AttestResponse{}, nil).Times(1) - validator.SubmitAttestation(context.Background(), 0, pubKey) + validator.SubmitAttestation(t.Context(), 0, pubKey) }) } } @@ -555,7 +555,7 @@ func TestAttestToBlockHead_CorrectBitfieldLength(t *testing.T) { generatedAttestation = att }).Return(ðpb.AttestResponse{}, nil /* error */) - validator.SubmitAttestation(context.Background(), 30, pubKey) + validator.SubmitAttestation(t.Context(), 30, pubKey) assert.Equal(t, 2, len(generatedAttestation.AggregationBits)) }) @@ -579,7 +579,7 @@ func TestSignAttestation(t *testing.T) { m.validatorClient.EXPECT(). DomainData(gomock.Any(), gomock.Any()). Return(ðpb.DomainResponse{SignatureDomain: attesterDomain}, nil) - ctx := context.Background() + ctx := t.Context() att := util.NewAttestation() att.Data.Source.Epoch = 100 att.Data.Target.Epoch = 200 @@ -614,7 +614,7 @@ func TestServer_WaitToSlotOneThird_CanWait(t *testing.T) { timeToSleep := params.BeaconConfig().SecondsPerSlot / 3 oneThird := currentTime + timeToSleep - v.waitOneThirdOrValidBlock(context.Background(), currentSlot) + v.waitOneThirdOrValidBlock(t.Context(), currentSlot) if oneThird != uint64(time.Now().Unix()) { t.Errorf("Wanted %d time for slot one third but got %d", oneThird, currentTime) @@ -632,7 +632,7 @@ func TestServer_WaitToSlotOneThird_SameReqSlot(t *testing.T) { highestValidSlot: currentSlot, } - v.waitOneThirdOrValidBlock(context.Background(), currentSlot) + v.waitOneThirdOrValidBlock(t.Context(), currentSlot) if currentTime != uint64(time.Now().Unix()) { t.Errorf("Wanted %d time for slot one third but got %d", uint64(time.Now().Unix()), currentTime) @@ -660,7 +660,7 @@ func TestServer_WaitToSlotOneThird_ReceiveBlockSlot(t *testing.T) { wg.Done() }() - v.waitOneThirdOrValidBlock(context.Background(), currentSlot) + v.waitOneThirdOrValidBlock(t.Context(), currentSlot) if currentTime != uint64(time.Now().Unix()) { t.Errorf("Wanted %d time for slot one third but got %d", uint64(time.Now().Unix()), currentTime) @@ -691,7 +691,7 @@ func Test_slashableAttestationCheck(t *testing.T) { }, } - err := validator.db.SlashableAttestationCheck(context.Background(), att, pubKey, [32]byte{1}, false, nil) + err := validator.db.SlashableAttestationCheck(t.Context(), att, pubKey, [32]byte{1}, false, nil) require.NoError(t, err, "Expected allowed attestation not to throw error") }) } @@ -702,7 +702,7 @@ func Test_slashableAttestationCheck_UpdatesLowestSignedEpochs(t *testing.T) { t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) { validator, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal) defer finish() - ctx := context.Background() + ctx := t.Context() var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) att := ðpb.IndexedAttestation{ @@ -729,18 +729,18 @@ func Test_slashableAttestationCheck_UpdatesLowestSignedEpochs(t *testing.T) { _, sr, err := validator.domainAndSigningRoot(ctx, att.Data) require.NoError(t, err) - err = validator.db.SlashableAttestationCheck(context.Background(), att, pubKey, sr, false, nil) + err = validator.db.SlashableAttestationCheck(t.Context(), att, pubKey, sr, false, nil) require.NoError(t, err) differentSigningRoot := [32]byte{2} - err = validator.db.SlashableAttestationCheck(context.Background(), att, pubKey, differentSigningRoot, false, nil) + err = validator.db.SlashableAttestationCheck(t.Context(), att, pubKey, differentSigningRoot, false, nil) require.ErrorContains(t, "could not sign attestation", err) - e, exists, err := validator.db.LowestSignedSourceEpoch(context.Background(), pubKey) + e, exists, err := validator.db.LowestSignedSourceEpoch(t.Context(), pubKey) require.NoError(t, err) require.Equal(t, true, exists) require.Equal(t, primitives.Epoch(4), e) - e, exists, err = validator.db.LowestSignedTargetEpoch(context.Background(), pubKey) + e, exists, err = validator.db.LowestSignedTargetEpoch(t.Context(), pubKey) require.NoError(t, err) require.Equal(t, true, exists) require.Equal(t, primitives.Epoch(10), e) @@ -751,7 +751,7 @@ func Test_slashableAttestationCheck_UpdatesLowestSignedEpochs(t *testing.T) { func Test_slashableAttestationCheck_OK(t *testing.T) { for _, isSlashingProtectionMinimal := range [...]bool{false, true} { t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validator, _, _, finish := setup(t, isSlashingProtectionMinimal) defer finish() att := ðpb.IndexedAttestation{ @@ -782,7 +782,7 @@ func Test_slashableAttestationCheck_OK(t *testing.T) { func Test_slashableAttestationCheck_GenesisEpoch(t *testing.T) { for _, isSlashingProtectionMinimal := range [...]bool{false, true} { t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validator, _, _, finish := setup(t, isSlashingProtectionMinimal) defer finish() att := ðpb.IndexedAttestation{ @@ -805,11 +805,11 @@ func Test_slashableAttestationCheck_GenesisEpoch(t *testing.T) { fakePubkey := bytesutil.ToBytes48([]byte("test")) err := validator.db.SlashableAttestationCheck(ctx, att, fakePubkey, [32]byte{}, false, nil) require.NoError(t, err, "Expected allowed attestation not to throw error") - e, exists, err := validator.db.LowestSignedSourceEpoch(context.Background(), fakePubkey) + e, exists, err := validator.db.LowestSignedSourceEpoch(t.Context(), fakePubkey) require.NoError(t, err) require.Equal(t, true, exists) require.Equal(t, primitives.Epoch(0), e) - e, exists, err = validator.db.LowestSignedTargetEpoch(context.Background(), fakePubkey) + e, exists, err = validator.db.LowestSignedTargetEpoch(t.Context(), fakePubkey) require.NoError(t, err) require.Equal(t, true, exists) require.Equal(t, primitives.Epoch(0), e) diff --git a/validator/client/beacon-api/attestation_data_test.go b/validator/client/beacon-api/attestation_data_test.go index 0a77295a37..7e424dbac6 100644 --- a/validator/client/beacon-api/attestation_data_test.go +++ b/validator/client/beacon-api/attestation_data_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "errors" "fmt" "strconv" @@ -17,7 +16,7 @@ import ( ) func TestGetAttestationData_ValidAttestation(t *testing.T) { - ctx := context.Background() + ctx := t.Context() expectedSlot := uint64(5) expectedCommitteeIndex := uint64(6) expectedBeaconBlockRoot := "0x0636045df9bdda3ab96592cf5389032c8ec3977f911e2b53509b348dfe164d4d" @@ -76,7 +75,7 @@ func TestGetAttestationData_ValidAttestation(t *testing.T) { } func TestGetAttestationData_InvalidData(t *testing.T) { - ctx := context.Background() + ctx := t.Context() testCases := []struct { name string @@ -204,7 +203,7 @@ func TestGetAttestationData_JsonResponseError(t *testing.T) { const slot = primitives.Slot(1) const committeeIndex = primitives.CommitteeIndex(2) - ctx := context.Background() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() diff --git a/validator/client/beacon-api/beacon_api_beacon_chain_client_test.go b/validator/client/beacon-api/beacon_api_beacon_chain_client_test.go index b08ebc0382..98d60e659a 100644 --- a/validator/client/beacon-api/beacon_api_beacon_chain_client_test.go +++ b/validator/client/beacon-api/beacon_api_beacon_chain_client_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "errors" "fmt" "math" @@ -26,7 +25,7 @@ func TestListValidators(t *testing.T) { t.Run("invalid token", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() beaconChainClient := beaconApiChainClient{} _, err := beaconChainClient.Validators(ctx, ðpb.ListValidatorsRequest{ @@ -38,7 +37,7 @@ func TestListValidators(t *testing.T) { t.Run("query filter epoch overflow", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() beaconChainClient := beaconApiChainClient{} _, err := beaconChainClient.Validators(ctx, ðpb.ListValidatorsRequest{ @@ -52,7 +51,7 @@ func TestListValidators(t *testing.T) { t.Run("fails to get validators for epoch filter", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( @@ -72,7 +71,7 @@ func TestListValidators(t *testing.T) { t.Run("fails to get validators for genesis filter", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( @@ -90,7 +89,7 @@ func TestListValidators(t *testing.T) { t.Run("fails to get validators for nil filter", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidatorsForHead(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( @@ -108,7 +107,7 @@ func TestListValidators(t *testing.T) { t.Run("fails to get latest block header for nil filter", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidatorsForHead(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( @@ -181,7 +180,7 @@ func TestListValidators(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidatorsForHead(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( @@ -322,7 +321,7 @@ func TestListValidators(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( @@ -550,7 +549,7 @@ func TestListValidators(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), primitives.Slot(0), make([]string, 0), []primitives.ValidatorIndex{}, nil).Return( @@ -738,7 +737,7 @@ func TestGetChainHead(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() finalityCheckpointsResponse := structs.GetFinalityCheckpointsResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -836,7 +835,7 @@ func TestGetChainHead(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -866,7 +865,7 @@ func TestGetChainHead(t *testing.T) { t.Run("returns a valid chain head", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) diff --git a/validator/client/beacon-api/beacon_api_helpers_test.go b/validator/client/beacon-api/beacon_api_helpers_test.go index 83d4c3bb07..25b6f94b33 100644 --- a/validator/client/beacon-api/beacon_api_helpers_test.go +++ b/validator/client/beacon-api/beacon_api_helpers_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "errors" "testing" @@ -31,7 +30,7 @@ func TestGetFork_Nominal(t *testing.T) { }, } - ctx := context.Background() + ctx := t.Context() jsonRestHandler.EXPECT().Get( gomock.Any(), @@ -59,7 +58,7 @@ func TestGetFork_Invalid(t *testing.T) { jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) - ctx := context.Background() + ctx := t.Context() jsonRestHandler.EXPECT().Get( gomock.Any(), @@ -98,7 +97,7 @@ func TestGetHeaders_Nominal(t *testing.T) { }, } - ctx := context.Background() + ctx := t.Context() jsonRestHandler.EXPECT().Get( gomock.Any(), @@ -126,7 +125,7 @@ func TestGetHeaders_Invalid(t *testing.T) { jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) - ctx := context.Background() + ctx := t.Context() jsonRestHandler.EXPECT().Get( gomock.Any(), @@ -169,7 +168,7 @@ func TestGetLiveness_Nominal(t *testing.T) { }, } - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -196,7 +195,7 @@ func TestGetLiveness_Invalid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -246,7 +245,7 @@ func TestGetIsSyncing_Nominal(t *testing.T) { }, } - ctx := context.Background() + ctx := t.Context() jsonRestHandler.EXPECT().Get( gomock.Any(), @@ -277,7 +276,7 @@ func TestGetIsSyncing_Invalid(t *testing.T) { syncingResponseJson := structs.SyncStatusResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) - ctx := context.Background() + ctx := t.Context() jsonRestHandler.EXPECT().Get( gomock.Any(), diff --git a/validator/client/beacon-api/beacon_api_node_client_test.go b/validator/client/beacon-api/beacon_api_node_client_test.go index 1263ede634..f998016bf5 100644 --- a/validator/client/beacon-api/beacon_api_node_client_test.go +++ b/validator/client/beacon-api/beacon_api_node_client_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "errors" "testing" @@ -109,7 +108,7 @@ func TestGetGenesis(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisProvider := mock.NewMockGenesisProvider(ctrl) genesisProvider.EXPECT().Genesis( @@ -198,7 +197,7 @@ func TestGetSyncStatus(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() syncingResponse := structs.SyncStatusResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -262,7 +261,7 @@ func TestGetVersion(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() var versionResponse structs.GetVersionResponse jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) diff --git a/validator/client/beacon-api/beacon_api_validator_client_test.go b/validator/client/beacon-api/beacon_api_validator_client_test.go index 76a27b2d5a..77d83fa7ad 100644 --- a/validator/client/beacon-api/beacon_api_validator_client_test.go +++ b/validator/client/beacon-api/beacon_api_validator_client_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "errors" "fmt" "testing" @@ -26,7 +25,7 @@ func TestBeaconApiValidatorClient_GetAttestationDataValid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) produceAttestationDataResponseJson := structs.GetAttestationDataResponse{} @@ -45,7 +44,7 @@ func TestBeaconApiValidatorClient_GetAttestationDataValid(t *testing.T) { expectedResp, expectedErr := validatorClient.attestationData(ctx, slot, committeeIndex) resp, err := validatorClient.AttestationData( - context.Background(), + t.Context(), ðpb.AttestationDataRequest{Slot: slot, CommitteeIndex: committeeIndex}, ) @@ -60,7 +59,7 @@ func TestBeaconApiValidatorClient_GetAttestationDataError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) produceAttestationDataResponseJson := structs.GetAttestationDataResponse{} @@ -79,7 +78,7 @@ func TestBeaconApiValidatorClient_GetAttestationDataError(t *testing.T) { expectedResp, expectedErr := validatorClient.attestationData(ctx, slot, committeeIndex) resp, err := validatorClient.AttestationData( - context.Background(), + t.Context(), ðpb.AttestationDataRequest{Slot: slot, CommitteeIndex: committeeIndex}, ) @@ -88,7 +87,7 @@ func TestBeaconApiValidatorClient_GetAttestationDataError(t *testing.T) { } func TestBeaconApiValidatorClient_GetFeeRecipientByPubKey(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validatorClient := beaconApiValidatorClient{} var expected *ethpb.FeeRecipientByPubKeyResponse = nil @@ -105,7 +104,7 @@ func TestBeaconApiValidatorClient_DomainDataValid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisProvider := mock.NewMockGenesisProvider(ctrl) genesisProvider.EXPECT().Genesis(gomock.Any()).Return( @@ -114,7 +113,7 @@ func TestBeaconApiValidatorClient_DomainDataValid(t *testing.T) { ).Times(2) validatorClient := beaconApiValidatorClient{genesisProvider: genesisProvider} - resp, err := validatorClient.DomainData(context.Background(), ðpb.DomainRequest{Epoch: epoch, Domain: domainType}) + resp, err := validatorClient.DomainData(t.Context(), ðpb.DomainRequest{Epoch: epoch, Domain: domainType}) domainTypeArray := bytesutil.ToBytes4(domainType) expectedResp, expectedErr := validatorClient.domainData(ctx, epoch, domainTypeArray) @@ -126,7 +125,7 @@ func TestBeaconApiValidatorClient_DomainDataError(t *testing.T) { epoch := params.BeaconConfig().AltairForkEpoch domainType := make([]byte, 3) validatorClient := beaconApiValidatorClient{} - _, err := validatorClient.DomainData(context.Background(), ðpb.DomainRequest{Epoch: epoch, Domain: domainType}) + _, err := validatorClient.DomainData(t.Context(), ðpb.DomainRequest{Epoch: epoch, Domain: domainType}) assert.ErrorContains(t, fmt.Sprintf("invalid domain type: %s", hexutil.Encode(domainType)), err) } @@ -134,7 +133,7 @@ func TestBeaconApiValidatorClient_ProposeBeaconBlockValid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -170,7 +169,7 @@ func TestBeaconApiValidatorClient_ProposeBeaconBlockError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( diff --git a/validator/client/beacon-api/beacon_committee_selections_test.go b/validator/client/beacon-api/beacon_committee_selections_test.go index a5a244299f..91c96cac87 100644 --- a/validator/client/beacon-api/beacon_committee_selections_test.go +++ b/validator/client/beacon-api/beacon_committee_selections_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -95,7 +94,7 @@ func TestGetAggregatedSelections(t *testing.T) { reqBody, err := json.Marshal(test.req) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() jsonRestHandler.EXPECT().Post( gomock.Any(), "/eth/v1/validator/beacon_committee_selections", diff --git a/validator/client/beacon-api/domain_data_test.go b/validator/client/beacon-api/domain_data_test.go index 5b4718d903..ee65efe24f 100644 --- a/validator/client/beacon-api/domain_data_test.go +++ b/validator/client/beacon-api/domain_data_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "errors" "testing" @@ -33,7 +32,7 @@ func TestGetDomainData_ValidDomainData(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() // Make sure that Genesis() is called exactly once genesisProvider := mock.NewMockGenesisProvider(ctrl) @@ -62,7 +61,7 @@ func TestGetDomainData_GenesisError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() // Make sure that Genesis() is called exactly once genesisProvider := mock.NewMockGenesisProvider(ctrl) @@ -81,7 +80,7 @@ func TestGetDomainData_InvalidGenesisRoot(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() // Make sure that Genesis() is called exactly once genesisProvider := mock.NewMockGenesisProvider(ctrl) diff --git a/validator/client/beacon-api/doppelganger_test.go b/validator/client/beacon-api/doppelganger_test.go index 2288951240..09c286ae90 100644 --- a/validator/client/beacon-api/doppelganger_test.go +++ b/validator/client/beacon-api/doppelganger_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -378,7 +377,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) { } doppelGangerActualOutput, err := validatorClient.CheckDoppelGanger( - context.Background(), + t.Context(), testCase.doppelGangerInput, ) @@ -812,7 +811,7 @@ func TestCheckDoppelGanger_Errors(t *testing.T) { } _, err := validatorClient.CheckDoppelGanger( - context.Background(), + t.Context(), ðpb.DoppelGangerRequest{ ValidatorRequests: testCase.inputValidatorRequests, }, diff --git a/validator/client/beacon-api/duties_test.go b/validator/client/beacon-api/duties_test.go index 37657a0ea2..0e07168dca 100644 --- a/validator/client/beacon-api/duties_test.go +++ b/validator/client/beacon-api/duties_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "errors" "fmt" @@ -58,7 +57,7 @@ func TestGetAttesterDuties_Valid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() validatorIndices := []primitives.ValidatorIndex{2, 9} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -87,7 +86,7 @@ func TestGetAttesterDuties_HttpError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -111,7 +110,7 @@ func TestGetAttesterDuties_NilAttesterDuty(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -155,7 +154,7 @@ func TestGetProposerDuties_Valid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -181,7 +180,7 @@ func TestGetProposerDuties_HttpError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -203,7 +202,7 @@ func TestGetProposerDuties_NilData(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -230,7 +229,7 @@ func TestGetProposerDuties_NilProposerDuty(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -282,7 +281,7 @@ func TestGetSyncDuties_Valid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() validatorIndices := []primitives.ValidatorIndex{2, 6} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -311,7 +310,7 @@ func TestGetSyncDuties_HttpError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -335,7 +334,7 @@ func TestGetSyncDuties_NilData(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -364,7 +363,7 @@ func TestGetSyncDuties_NilSyncDuty(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -414,7 +413,7 @@ func TestGetCommittees_Valid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -440,7 +439,7 @@ func TestGetCommittees_HttpError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -462,7 +461,7 @@ func TestGetCommittees_NilData(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -489,7 +488,7 @@ func TestGetCommittees_NilCommittee(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -604,7 +603,7 @@ func TestGetDutiesForEpoch_Error(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() var attesterDuties []*structs.AttesterDuty if testCase.generateAttesterDuties == nil { @@ -705,7 +704,7 @@ func TestGetDutiesForEpoch_Valid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() dutiesProvider := mock.NewMockdutiesProvider(ctrl) @@ -951,7 +950,7 @@ func TestGetDuties_Valid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() dutiesProvider := mock.NewMockdutiesProvider(ctrl) @@ -1183,7 +1182,7 @@ func TestGetDuties_GetStateValidatorsFailed(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidators( @@ -1212,7 +1211,7 @@ func TestGetDuties_GetDutiesForEpochFailed(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() pubkey := []byte{1, 2, 3} stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) diff --git a/validator/client/beacon-api/genesis_test.go b/validator/client/beacon-api/genesis_test.go index 1c7d1b1af4..c93a432212 100644 --- a/validator/client/beacon-api/genesis_test.go +++ b/validator/client/beacon-api/genesis_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/api/server/structs" @@ -16,7 +15,7 @@ func TestGetGenesis_ValidGenesis(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisResponseJson := structs.GetGenesisResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -48,7 +47,7 @@ func TestGetGenesis_NilData(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisResponseJson := structs.GetGenesisResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -72,7 +71,7 @@ func TestGetGenesis_EndpointCalledOnlyOnce(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisResponseJson := structs.GetGenesisResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -106,7 +105,7 @@ func TestGetGenesis_EndpointCanBeCalledAgainAfterError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisResponseJson := structs.GetGenesisResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) diff --git a/validator/client/beacon-api/get_beacon_block_test.go b/validator/client/beacon-api/get_beacon_block_test.go index 0c3c8514cf..e038235dbb 100644 --- a/validator/client/beacon-api/get_beacon_block_test.go +++ b/validator/client/beacon-api/get_beacon_block_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "encoding/json" "errors" "fmt" @@ -22,7 +21,7 @@ func TestGetBeaconBlock_RequestFailed(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -119,7 +118,7 @@ func TestGetBeaconBlock_Error(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -155,7 +154,7 @@ func TestGetBeaconBlock_Phase0Valid(t *testing.T) { const slot = primitives.Slot(1) randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -198,7 +197,7 @@ func TestGetBeaconBlock_AltairValid(t *testing.T) { randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -241,7 +240,7 @@ func TestGetBeaconBlock_BellatrixValid(t *testing.T) { randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -286,7 +285,7 @@ func TestGetBeaconBlock_BlindedBellatrixValid(t *testing.T) { randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -331,7 +330,7 @@ func TestGetBeaconBlock_CapellaValid(t *testing.T) { randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -376,7 +375,7 @@ func TestGetBeaconBlock_BlindedCapellaValid(t *testing.T) { randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -421,7 +420,7 @@ func TestGetBeaconBlock_DenebValid(t *testing.T) { randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -466,7 +465,7 @@ func TestGetBeaconBlock_BlindedDenebValid(t *testing.T) { randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -511,7 +510,7 @@ func TestGetBeaconBlock_ElectraValid(t *testing.T) { randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -556,7 +555,7 @@ func TestGetBeaconBlock_BlindedElectraValid(t *testing.T) { randaoReveal := []byte{2} graffiti := []byte{3} - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( diff --git a/validator/client/beacon-api/index_test.go b/validator/client/beacon-api/index_test.go index b3756d455b..4f6e905bbf 100644 --- a/validator/client/beacon-api/index_test.go +++ b/validator/client/beacon-api/index_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "net/url" "testing" @@ -39,7 +38,7 @@ func TestIndex_Nominal(t *testing.T) { defer ctrl.Finish() pubKey, reqBuffer := getPubKeyAndReqBuffer(t) - ctx := context.Background() + ctx := t.Context() stateValidatorsResponseJson := structs.GetValidatorsResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -89,7 +88,7 @@ func TestIndex_UnexistingValidator(t *testing.T) { defer ctrl.Finish() pubKey, reqBuffer := getPubKeyAndReqBuffer(t) - ctx := context.Background() + ctx := t.Context() stateValidatorsResponseJson := structs.GetValidatorsResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -131,7 +130,7 @@ func TestIndex_BadIndexError(t *testing.T) { defer ctrl.Finish() pubKey, reqBuffer := getPubKeyAndReqBuffer(t) - ctx := context.Background() + ctx := t.Context() stateValidatorsResponseJson := structs.GetValidatorsResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -180,7 +179,7 @@ func TestIndex_JsonResponseError(t *testing.T) { defer ctrl.Finish() pubKey, reqBuffer := getPubKeyAndReqBuffer(t) - ctx := context.Background() + ctx := t.Context() stateValidatorsResponseJson := structs.GetValidatorsResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) diff --git a/validator/client/beacon-api/json_rest_handler_test.go b/validator/client/beacon-api/json_rest_handler_test.go index e0be24023a..1a8666d028 100644 --- a/validator/client/beacon-api/json_rest_handler_test.go +++ b/validator/client/beacon-api/json_rest_handler_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "io" "net/http" @@ -19,7 +18,7 @@ import ( ) func TestGet(t *testing.T) { - ctx := context.Background() + ctx := t.Context() const endpoint = "/example/rest/api/endpoint" genesisJson := &structs.GetGenesisResponse{ Data: &structs.Genesis{ @@ -50,7 +49,7 @@ func TestGet(t *testing.T) { } func TestPost(t *testing.T) { - ctx := context.Background() + ctx := t.Context() const endpoint = "/example/rest/api/endpoint" dataBytes := []byte{1, 2, 3, 4, 5} headers := map[string]string{"foo": "bar"} diff --git a/validator/client/beacon-api/prepare_beacon_proposer_test.go b/validator/client/beacon-api/prepare_beacon_proposer_test.go index 36380f4169..03f576f34b 100644 --- a/validator/client/beacon-api/prepare_beacon_proposer_test.go +++ b/validator/client/beacon-api/prepare_beacon_proposer_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -26,7 +25,7 @@ func TestPrepareBeaconProposer_Valid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRecipients := []*structs.FeeRecipient{ { @@ -88,7 +87,7 @@ func TestPrepareBeaconProposer_BadRequest(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( diff --git a/validator/client/beacon-api/propose_attestation_test.go b/validator/client/beacon-api/propose_attestation_test.go index 640a35a147..4b2a5c86e5 100644 --- a/validator/client/beacon-api/propose_attestation_test.go +++ b/validator/client/beacon-api/propose_attestation_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "errors" "net/http" @@ -116,7 +115,7 @@ func TestProposeAttestation(t *testing.T) { marshalledAttestations = b } - ctx := context.Background() + ctx := t.Context() headers := map[string]string{"Eth-Consensus-Version": version.String(test.attestation.Version())} jsonRestHandler.EXPECT().Post( @@ -177,7 +176,7 @@ func TestProposeAttestationFallBack(t *testing.T) { marshalledAttestations = b } - ctx := context.Background() + ctx := t.Context() headers := map[string]string{"Eth-Consensus-Version": version.String(attestation.Version())} jsonRestHandler.EXPECT().Post( gomock.Any(), @@ -304,7 +303,7 @@ func TestProposeAttestationElectra(t *testing.T) { marshalledAttestations = b } - ctx := context.Background() + ctx := t.Context() headers := map[string]string{"Eth-Consensus-Version": version.String(test.attestation.Version())} jsonRestHandler.EXPECT().Post( gomock.Any(), diff --git a/validator/client/beacon-api/propose_beacon_block_altair_test.go b/validator/client/beacon-api/propose_beacon_block_altair_test.go index 6e3eee6402..fe7b354525 100644 --- a/validator/client/beacon-api/propose_beacon_block_altair_test.go +++ b/validator/client/beacon-api/propose_beacon_block_altair_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -30,7 +29,7 @@ func TestProposeBeaconBlock_Altair(t *testing.T) { marshalledBlock, err := json.Marshal(jsonAltairBlock) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() // Make sure that what we send in the POST body is the marshalled version of the protobuf block headers := map[string]string{"Eth-Consensus-Version": "altair"} diff --git a/validator/client/beacon-api/propose_beacon_block_bellatrix_test.go b/validator/client/beacon-api/propose_beacon_block_bellatrix_test.go index c764947cee..bdef5767cd 100644 --- a/validator/client/beacon-api/propose_beacon_block_bellatrix_test.go +++ b/validator/client/beacon-api/propose_beacon_block_bellatrix_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -31,7 +30,7 @@ func TestProposeBeaconBlock_Bellatrix(t *testing.T) { marshalledBlock, err := json.Marshal(jsonBellatrixBlock) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() // Make sure that what we send in the POST body is the marshalled version of the protobuf block headers := map[string]string{"Eth-Consensus-Version": "bellatrix"} diff --git a/validator/client/beacon-api/propose_beacon_block_blinded_bellatrix_test.go b/validator/client/beacon-api/propose_beacon_block_blinded_bellatrix_test.go index a23384cf0b..40a7d24bf4 100644 --- a/validator/client/beacon-api/propose_beacon_block_blinded_bellatrix_test.go +++ b/validator/client/beacon-api/propose_beacon_block_blinded_bellatrix_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -32,7 +31,7 @@ func TestProposeBeaconBlock_BlindedBellatrix(t *testing.T) { marshalledBlock, err := json.Marshal(jsonBlindedBellatrixBlock) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() // Make sure that what we send in the POST body is the marshalled version of the protobuf block headers := map[string]string{"Eth-Consensus-Version": "bellatrix"} diff --git a/validator/client/beacon-api/propose_beacon_block_blinded_capella_test.go b/validator/client/beacon-api/propose_beacon_block_blinded_capella_test.go index 59f8a32058..f092f96784 100644 --- a/validator/client/beacon-api/propose_beacon_block_blinded_capella_test.go +++ b/validator/client/beacon-api/propose_beacon_block_blinded_capella_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -32,7 +31,7 @@ func TestProposeBeaconBlock_BlindedCapella(t *testing.T) { marshalledBlock, err := json.Marshal(jsonBlindedCapellaBlock) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() // Make sure that what we send in the POST body is the marshalled version of the protobuf block headers := map[string]string{"Eth-Consensus-Version": "capella"} diff --git a/validator/client/beacon-api/propose_beacon_block_blinded_deneb_test.go b/validator/client/beacon-api/propose_beacon_block_blinded_deneb_test.go index 18fefcc728..8bb44a0bec 100644 --- a/validator/client/beacon-api/propose_beacon_block_blinded_deneb_test.go +++ b/validator/client/beacon-api/propose_beacon_block_blinded_deneb_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -38,7 +37,7 @@ func TestProposeBeaconBlock_BlindedDeneb(t *testing.T) { ) validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - proposeResponse, err := validatorClient.proposeBeaconBlock(context.Background(), genericSignedBlock) + proposeResponse, err := validatorClient.proposeBeaconBlock(t.Context(), genericSignedBlock) assert.NoError(t, err) require.NotNil(t, proposeResponse) diff --git a/validator/client/beacon-api/propose_beacon_block_blinded_electra_test.go b/validator/client/beacon-api/propose_beacon_block_blinded_electra_test.go index 621ef17b61..a77abac346 100644 --- a/validator/client/beacon-api/propose_beacon_block_blinded_electra_test.go +++ b/validator/client/beacon-api/propose_beacon_block_blinded_electra_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -38,7 +37,7 @@ func TestProposeBeaconBlock_BlindedElectra(t *testing.T) { ) validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - proposeResponse, err := validatorClient.proposeBeaconBlock(context.Background(), genericSignedBlock) + proposeResponse, err := validatorClient.proposeBeaconBlock(t.Context(), genericSignedBlock) assert.NoError(t, err) require.NotNil(t, proposeResponse) diff --git a/validator/client/beacon-api/propose_beacon_block_blinded_fulu_test.go b/validator/client/beacon-api/propose_beacon_block_blinded_fulu_test.go index 0a442522ac..18fad855a7 100644 --- a/validator/client/beacon-api/propose_beacon_block_blinded_fulu_test.go +++ b/validator/client/beacon-api/propose_beacon_block_blinded_fulu_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -38,7 +37,7 @@ func TestProposeBeaconBlock_BlindedFulu(t *testing.T) { ) validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - proposeResponse, err := validatorClient.proposeBeaconBlock(context.Background(), genericSignedBlock) + proposeResponse, err := validatorClient.proposeBeaconBlock(t.Context(), genericSignedBlock) assert.NoError(t, err) require.NotNil(t, proposeResponse) diff --git a/validator/client/beacon-api/propose_beacon_block_capella_test.go b/validator/client/beacon-api/propose_beacon_block_capella_test.go index c81354b126..076055219d 100644 --- a/validator/client/beacon-api/propose_beacon_block_capella_test.go +++ b/validator/client/beacon-api/propose_beacon_block_capella_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -42,7 +41,7 @@ func TestProposeBeaconBlock_Capella(t *testing.T) { ) validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - proposeResponse, err := validatorClient.proposeBeaconBlock(context.Background(), genericSignedBlock) + proposeResponse, err := validatorClient.proposeBeaconBlock(t.Context(), genericSignedBlock) assert.NoError(t, err) require.NotNil(t, proposeResponse) diff --git a/validator/client/beacon-api/propose_beacon_block_deneb_test.go b/validator/client/beacon-api/propose_beacon_block_deneb_test.go index 722337ef10..b8267b52db 100644 --- a/validator/client/beacon-api/propose_beacon_block_deneb_test.go +++ b/validator/client/beacon-api/propose_beacon_block_deneb_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -38,7 +37,7 @@ func TestProposeBeaconBlock_Deneb(t *testing.T) { ) validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - proposeResponse, err := validatorClient.proposeBeaconBlock(context.Background(), genericSignedBlock) + proposeResponse, err := validatorClient.proposeBeaconBlock(t.Context(), genericSignedBlock) assert.NoError(t, err) require.NotNil(t, proposeResponse) diff --git a/validator/client/beacon-api/propose_beacon_block_electra_test.go b/validator/client/beacon-api/propose_beacon_block_electra_test.go index 5ec27d9113..ccd3ff5884 100644 --- a/validator/client/beacon-api/propose_beacon_block_electra_test.go +++ b/validator/client/beacon-api/propose_beacon_block_electra_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -38,7 +37,7 @@ func TestProposeBeaconBlock_Electra(t *testing.T) { ) validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - proposeResponse, err := validatorClient.proposeBeaconBlock(context.Background(), genericSignedBlock) + proposeResponse, err := validatorClient.proposeBeaconBlock(t.Context(), genericSignedBlock) assert.NoError(t, err) require.NotNil(t, proposeResponse) diff --git a/validator/client/beacon-api/propose_beacon_block_fulu_test.go b/validator/client/beacon-api/propose_beacon_block_fulu_test.go index 849e3f2af4..49cbf55a8e 100644 --- a/validator/client/beacon-api/propose_beacon_block_fulu_test.go +++ b/validator/client/beacon-api/propose_beacon_block_fulu_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -38,7 +37,7 @@ func TestProposeBeaconBlock_Fulu(t *testing.T) { ) validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - proposeResponse, err := validatorClient.proposeBeaconBlock(context.Background(), genericSignedBlock) + proposeResponse, err := validatorClient.proposeBeaconBlock(t.Context(), genericSignedBlock) assert.NoError(t, err) require.NotNil(t, proposeResponse) diff --git a/validator/client/beacon-api/propose_beacon_block_phase0_test.go b/validator/client/beacon-api/propose_beacon_block_phase0_test.go index 7f441e336c..187a06f336 100644 --- a/validator/client/beacon-api/propose_beacon_block_phase0_test.go +++ b/validator/client/beacon-api/propose_beacon_block_phase0_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -30,7 +29,7 @@ func TestProposeBeaconBlock_Phase0(t *testing.T) { marshalledBlock, err := json.Marshal(jsonPhase0Block) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() // Make sure that what we send in the POST body is the marshalled version of the protobuf block headers := map[string]string{"Eth-Consensus-Version": "phase0"} diff --git a/validator/client/beacon-api/propose_beacon_block_test.go b/validator/client/beacon-api/propose_beacon_block_test.go index 9e3c2dab25..72528631a6 100644 --- a/validator/client/beacon-api/propose_beacon_block_test.go +++ b/validator/client/beacon-api/propose_beacon_block_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "errors" "net/http" "testing" @@ -104,7 +103,7 @@ func TestProposeBeaconBlock_Error(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) headers := map[string]string{"Eth-Consensus-Version": testCase.consensusVersion} @@ -128,6 +127,6 @@ func TestProposeBeaconBlock_Error(t *testing.T) { func TestProposeBeaconBlock_UnsupportedBlockType(t *testing.T) { validatorClient := &beaconApiValidatorClient{} - _, err := validatorClient.proposeBeaconBlock(context.Background(), ðpb.GenericSignedBeaconBlock{}) + _, err := validatorClient.proposeBeaconBlock(t.Context(), ðpb.GenericSignedBeaconBlock{}) assert.ErrorContains(t, "unsupported block type", err) } diff --git a/validator/client/beacon-api/propose_exit_test.go b/validator/client/beacon-api/propose_exit_test.go index d88fd852c4..1ef545711a 100644 --- a/validator/client/beacon-api/propose_exit_test.go +++ b/validator/client/beacon-api/propose_exit_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -35,7 +34,7 @@ func TestProposeExit_Valid(t *testing.T) { marshalledVoluntaryExit, err := json.Marshal(jsonSignedVoluntaryExit) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -70,13 +69,13 @@ func TestProposeExit_Valid(t *testing.T) { func TestProposeExit_NilSignedVoluntaryExit(t *testing.T) { validatorClient := &beaconApiValidatorClient{} - _, err := validatorClient.proposeExit(context.Background(), nil) + _, err := validatorClient.proposeExit(t.Context(), nil) assert.ErrorContains(t, "signed voluntary exit is nil", err) } func TestProposeExit_NilExit(t *testing.T) { validatorClient := &beaconApiValidatorClient{} - _, err := validatorClient.proposeExit(context.Background(), ðpb.SignedVoluntaryExit{}) + _, err := validatorClient.proposeExit(t.Context(), ðpb.SignedVoluntaryExit{}) assert.ErrorContains(t, "exit is nil", err) } @@ -84,7 +83,7 @@ func TestProposeExit_BadRequest(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( diff --git a/validator/client/beacon-api/prysm_beacon_chain_client_test.go b/validator/client/beacon-api/prysm_beacon_chain_client_test.go index 547377e284..37bd17aff5 100644 --- a/validator/client/beacon-api/prysm_beacon_chain_client_test.go +++ b/validator/client/beacon-api/prysm_beacon_chain_client_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "errors" "testing" @@ -116,7 +115,7 @@ func TestGetValidatorCount(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) // Expect node version endpoint call. @@ -170,7 +169,7 @@ func Test_beaconApiBeaconChainClient_GetValidatorPerformance(t *testing.T) { bytesutil.ToBytes48([]byte{3}), } - ctx := context.Background() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() diff --git a/validator/client/beacon-api/registration_test.go b/validator/client/beacon-api/registration_test.go index 9fe53ac4aa..88eb4f25bc 100644 --- a/validator/client/beacon-api/registration_test.go +++ b/validator/client/beacon-api/registration_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -131,7 +130,7 @@ func TestRegistration_Valid(t *testing.T) { } validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - res, err := validatorClient.SubmitValidatorRegistrations(context.Background(), &protoRegistrations) + res, err := validatorClient.SubmitValidatorRegistrations(t.Context(), &protoRegistrations) assert.DeepEqual(t, new(empty.Empty), res) require.NoError(t, err) @@ -153,6 +152,6 @@ func TestRegistration_BadRequest(t *testing.T) { ).Times(1) validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - _, err := validatorClient.SubmitValidatorRegistrations(context.Background(), ðpb.SignedValidatorRegistrationsV1{}) + _, err := validatorClient.SubmitValidatorRegistrations(t.Context(), ðpb.SignedValidatorRegistrationsV1{}) assert.ErrorContains(t, "foo error", err) } diff --git a/validator/client/beacon-api/state_validators_test.go b/validator/client/beacon-api/state_validators_test.go index 88013839e6..e05a9fdff8 100644 --- a/validator/client/beacon-api/state_validators_test.go +++ b/validator/client/beacon-api/state_validators_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "net/url" "testing" @@ -68,7 +67,7 @@ func TestGetStateValidators_Nominal_POST(t *testing.T) { }, } - ctx := context.Background() + ctx := t.Context() jsonRestHandler.EXPECT().Post( gomock.Any(), @@ -154,7 +153,7 @@ func TestGetStateValidators_Nominal_GET(t *testing.T) { }, } - ctx := context.Background() + ctx := t.Context() // First return an error from POST call. jsonRestHandler.EXPECT().Post( @@ -223,7 +222,7 @@ func TestGetStateValidators_GetRestJsonResponseOnError(t *testing.T) { stateValidatorsResponseJson := structs.GetValidatorsResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) - ctx := context.Background() + ctx := t.Context() // First call POST. jsonRestHandler.EXPECT().Post( @@ -276,7 +275,7 @@ func TestGetStateValidators_DataIsNil_POST(t *testing.T) { reqBytes, err := json.Marshal(req) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() stateValidatorsResponseJson := structs.GetValidatorsResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -315,7 +314,7 @@ func TestGetStateValidators_DataIsNil_GET(t *testing.T) { reqBytes, err := json.Marshal(req) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() stateValidatorsResponseJson := structs.GetValidatorsResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) diff --git a/validator/client/beacon-api/status_test.go b/validator/client/beacon-api/status_test.go index 6d75d33799..64315cd17f 100644 --- a/validator/client/beacon-api/status_test.go +++ b/validator/client/beacon-api/status_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "errors" "fmt" "testing" @@ -26,7 +25,7 @@ func TestValidatorStatus_Nominal(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) @@ -91,7 +90,7 @@ func TestValidatorStatus_Error(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) @@ -123,7 +122,7 @@ func TestMultipleValidatorStatus_Nominal(t *testing.T) { "0x8000a6c975761b488bdb0dfba4ed37c0d97d6e6b968562ef5c84aa9a5dfb92d8e309195004e97709077723739bf04463", // existing } - ctx := context.Background() + ctx := t.Context() validatorsPubKey := make([][]byte, len(stringValidatorsPubKey)) for i, stringValidatorPubKey := range stringValidatorsPubKey { @@ -219,7 +218,7 @@ func TestMultipleValidatorStatus_No_Keys(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) validatorClient := beaconApiValidatorClient{stateValidatorsProvider: stateValidatorsProvider} @@ -238,7 +237,7 @@ func TestGetValidatorsStatusResponse_Nominal_SomeActiveValidators(t *testing.T) ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stringValidatorsPubKey := []string{ "0x8000091c2ae64ee414a54c1cc1fc67dec663408bc636cb86756e0200e41a75c8f86603f104f02c856983d2783116be13", // existing "0x8000a6c975761b488bdb0dfba4ed37c0d97d6e6b968562ef5c84aa9a5dfb92d8e309195004e97709077723739bf04463", // existing @@ -442,7 +441,7 @@ func TestGetValidatorsStatusResponse_Nominal_NoActiveValidators(t *testing.T) { validatorPubKey, err := hexutil.Decode(stringValidatorPubKey) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidators( @@ -693,7 +692,7 @@ func TestValidatorStatusResponse_InvalidData(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl) stateValidatorsProvider.EXPECT().StateValidators( gomock.Any(), diff --git a/validator/client/beacon-api/stream_blocks_test.go b/validator/client/beacon-api/stream_blocks_test.go index 0c022c2bcf..eb10cfb644 100644 --- a/validator/client/beacon-api/stream_blocks_test.go +++ b/validator/client/beacon-api/stream_blocks_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "encoding/json" "fmt" "testing" @@ -23,7 +22,7 @@ func TestStreamBlocks_UnsupportedConsensusVersion(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -145,7 +144,7 @@ func TestStreamBlocks_Error(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -195,7 +194,7 @@ func TestStreamBlocks_Phase0Valid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() signedBlockResponseJson := abstractSignedBlockResponseJson{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -356,7 +355,7 @@ func TestStreamBlocks_AltairValid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() signedBlockResponseJson := abstractSignedBlockResponseJson{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -517,7 +516,7 @@ func TestStreamBlocks_BellatrixValid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() signedBlockResponseJson := abstractSignedBlockResponseJson{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -678,7 +677,7 @@ func TestStreamBlocks_CapellaValid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() signedBlockResponseJson := abstractSignedBlockResponseJson{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) @@ -839,7 +838,7 @@ func TestStreamBlocks_DenebValid(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() signedBlockResponseJson := abstractSignedBlockResponseJson{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) diff --git a/validator/client/beacon-api/submit_aggregate_selection_proof_test.go b/validator/client/beacon-api/submit_aggregate_selection_proof_test.go index 11e90c2df0..fbd6cd2ee9 100644 --- a/validator/client/beacon-api/submit_aggregate_selection_proof_test.go +++ b/validator/client/beacon-api/submit_aggregate_selection_proof_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "encoding/json" "errors" "fmt" @@ -96,7 +95,7 @@ func TestSubmitAggregateSelectionProof(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) // Call node syncing endpoint to check if head is optimistic. @@ -216,7 +215,7 @@ func TestSubmitAggregateSelectionProofFallBack(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) // Call node syncing endpoint to check if head is optimistic. @@ -388,7 +387,7 @@ func TestSubmitAggregateSelectionProofElectra(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) // Call node syncing endpoint to check if head is optimistic. diff --git a/validator/client/beacon-api/submit_signed_aggregate_proof_test.go b/validator/client/beacon-api/submit_signed_aggregate_proof_test.go index 760327bd9e..d41b9a89ab 100644 --- a/validator/client/beacon-api/submit_signed_aggregate_proof_test.go +++ b/validator/client/beacon-api/submit_signed_aggregate_proof_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "net/http" "testing" @@ -27,7 +26,7 @@ func TestSubmitSignedAggregateSelectionProof_Valid(t *testing.T) { marshalledSignedAggregateSignedAndProof, err := json.Marshal([]*structs.SignedAggregateAttestationAndProof{jsonifySignedAggregateAndProof(signedAggregateAndProof)}) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() headers := map[string]string{"Eth-Consensus-Version": version.String(signedAggregateAndProof.Message.Version())} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -59,7 +58,7 @@ func TestSubmitSignedAggregateSelectionProof_BadRequest(t *testing.T) { marshalledSignedAggregateSignedAndProof, err := json.Marshal([]*structs.SignedAggregateAttestationAndProof{jsonifySignedAggregateAndProof(signedAggregateAndProof)}) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() headers := map[string]string{"Eth-Consensus-Version": version.String(signedAggregateAndProof.Message.Version())} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -87,7 +86,7 @@ func TestSubmitSignedAggregateSelectionProof_Fallback(t *testing.T) { marshalledSignedAggregateSignedAndProof, err := json.Marshal([]*structs.SignedAggregateAttestationAndProof{jsonifySignedAggregateAndProof(signedAggregateAndProof)}) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) headers := map[string]string{"Eth-Consensus-Version": version.String(signedAggregateAndProof.Message.Version())} @@ -131,7 +130,7 @@ func TestSubmitSignedAggregateSelectionProofElectra_Valid(t *testing.T) { marshalledSignedAggregateSignedAndProofElectra, err := json.Marshal([]*structs.SignedAggregateAttestationAndProofElectra{jsonifySignedAggregateAndProofElectra(signedAggregateAndProofElectra)}) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() headers := map[string]string{"Eth-Consensus-Version": version.String(signedAggregateAndProofElectra.Message.Version())} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -163,7 +162,7 @@ func TestSubmitSignedAggregateSelectionProofElectra_BadRequest(t *testing.T) { marshalledSignedAggregateSignedAndProofElectra, err := json.Marshal([]*structs.SignedAggregateAttestationAndProofElectra{jsonifySignedAggregateAndProofElectra(signedAggregateAndProofElectra)}) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() headers := map[string]string{"Eth-Consensus-Version": version.String(signedAggregateAndProofElectra.Message.Version())} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( diff --git a/validator/client/beacon-api/submit_signed_contribution_and_proof_test.go b/validator/client/beacon-api/submit_signed_contribution_and_proof_test.go index c4b6e2868f..4bc7c1e93d 100644 --- a/validator/client/beacon-api/submit_signed_contribution_and_proof_test.go +++ b/validator/client/beacon-api/submit_signed_contribution_and_proof_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "errors" "testing" @@ -42,7 +41,7 @@ func TestSubmitSignedContributionAndProof_Valid(t *testing.T) { marshalledContributionAndProofs, err := json.Marshal(jsonContributionAndProofs) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -116,7 +115,7 @@ func TestSubmitSignedContributionAndProof_Error(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) if testCase.httpRequestExpected { diff --git a/validator/client/beacon-api/subscribe_committee_subnets_test.go b/validator/client/beacon-api/subscribe_committee_subnets_test.go index 2a4c7f8979..838df84637 100644 --- a/validator/client/beacon-api/subscribe_committee_subnets_test.go +++ b/validator/client/beacon-api/subscribe_committee_subnets_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "errors" "strconv" @@ -43,7 +42,7 @@ func TestSubscribeCommitteeSubnets_Valid(t *testing.T) { committeeSubscriptionsBytes, err := json.Marshal(jsonCommitteeSubscriptions) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Post( @@ -204,7 +203,7 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) if testCase.expectSubscribeRestCall { diff --git a/validator/client/beacon-api/sync_committee_selections_test.go b/validator/client/beacon-api/sync_committee_selections_test.go index d73c8713be..3f1e1feca8 100644 --- a/validator/client/beacon-api/sync_committee_selections_test.go +++ b/validator/client/beacon-api/sync_committee_selections_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "testing" @@ -102,7 +101,7 @@ func TestGetAggregatedSyncSelections(t *testing.T) { reqBody, err := json.Marshal(test.req) require.NoError(t, err) - ctx := context.Background() + ctx := t.Context() jsonRestHandler.EXPECT().Post( gomock.Any(), "/eth/v1/validator/sync_committee_selections", diff --git a/validator/client/beacon-api/sync_committee_test.go b/validator/client/beacon-api/sync_committee_test.go index b11f78c440..73384a140f 100644 --- a/validator/client/beacon-api/sync_committee_test.go +++ b/validator/client/beacon-api/sync_committee_test.go @@ -2,7 +2,6 @@ package beacon_api import ( "bytes" - "context" "encoding/json" "fmt" "net/url" @@ -64,7 +63,7 @@ func TestSubmitSyncMessage_Valid(t *testing.T) { } validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - res, err := validatorClient.SubmitSyncMessage(context.Background(), &protoSyncCommitteeMessage) + res, err := validatorClient.SubmitSyncMessage(t.Context(), &protoSyncCommitteeMessage) assert.DeepEqual(t, new(empty.Empty), res) require.NoError(t, err) @@ -86,7 +85,7 @@ func TestSubmitSyncMessage_BadRequest(t *testing.T) { ).Times(1) validatorClient := &beaconApiValidatorClient{jsonRestHandler: jsonRestHandler} - _, err := validatorClient.SubmitSyncMessage(context.Background(), ðpb.SyncCommitteeMessage{}) + _, err := validatorClient.SubmitSyncMessage(t.Context(), ðpb.SyncCommitteeMessage{}) assert.ErrorContains(t, "foo error", err) } @@ -137,7 +136,7 @@ func TestGetSyncMessageBlockRoot(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( gomock.Any(), @@ -207,7 +206,7 @@ func TestGetSyncCommitteeContribution(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( gomock.Any(), @@ -308,7 +307,7 @@ func TestGetSyncSubCommitteeIndex(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() valsReq := &structs.GetValidatorsRequest{ Ids: []string{pubkeyStr}, Statuses: []string{}, diff --git a/validator/client/beacon-api/wait_for_chain_start_test.go b/validator/client/beacon-api/wait_for_chain_start_test.go index f4c921b579..5e91158b58 100644 --- a/validator/client/beacon-api/wait_for_chain_start_test.go +++ b/validator/client/beacon-api/wait_for_chain_start_test.go @@ -1,7 +1,6 @@ package beacon_api import ( - "context" "errors" "net/http" "testing" @@ -20,7 +19,7 @@ func TestWaitForChainStart_ValidGenesis(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisResponseJson := structs.GetGenesisResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -87,7 +86,7 @@ func TestWaitForChainStart_BadGenesis(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisResponseJson := structs.GetGenesisResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -115,7 +114,7 @@ func TestWaitForChainStart_JsonResponseError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisResponseJson := structs.GetGenesisResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) jsonRestHandler.EXPECT().Get( @@ -138,7 +137,7 @@ func TestWaitForChainStart_JsonResponseError404(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() genesisResponseJson := structs.GetGenesisResponse{} jsonRestHandler := mock.NewMockJsonRestHandler(ctrl) diff --git a/validator/client/grpc-api/grpc_prysm_beacon_chain_client_test.go b/validator/client/grpc-api/grpc_prysm_beacon_chain_client_test.go index 7df0944f36..6df8da8e04 100644 --- a/validator/client/grpc-api/grpc_prysm_beacon_chain_client_test.go +++ b/validator/client/grpc-api/grpc_prysm_beacon_chain_client_test.go @@ -1,7 +1,6 @@ package grpc_api import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/config/params" @@ -318,7 +317,7 @@ func TestGetValidatorCount(t *testing.T) { require.Equal(t, true, ok) statuses = append(statuses, valStatus) } - vcCountResp, err := prysmBeaconChainClient.ValidatorCount(context.Background(), "", statuses) + vcCountResp, err := prysmBeaconChainClient.ValidatorCount(t.Context(), "", statuses) require.NoError(t, err) require.DeepEqual(t, test.expectedResponse, vcCountResp) }) diff --git a/validator/client/grpc-api/grpc_validator_client_test.go b/validator/client/grpc-api/grpc_validator_client_test.go index 18d569dacd..57a5e675d3 100644 --- a/validator/client/grpc-api/grpc_validator_client_test.go +++ b/validator/client/grpc-api/grpc_validator_client_test.go @@ -134,14 +134,14 @@ func TestWaitForChainStart_StreamSetupFails(t *testing.T) { ).Return(nil, errors.New("failed stream")) validatorClient := &grpcValidatorClient{beaconNodeValidatorClient, true} - _, err := validatorClient.WaitForChainStart(context.Background(), &emptypb.Empty{}) + _, err := validatorClient.WaitForChainStart(t.Context(), &emptypb.Empty{}) want := "could not setup beacon chain ChainStart streaming client" assert.ErrorContains(t, want, err) } func TestStartEventStream(t *testing.T) { hook := logTest.NewGlobal() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() ctrl := gomock.NewController(t) defer ctrl.Finish() diff --git a/validator/client/key_reload_test.go b/validator/client/key_reload_test.go index c0fa5f984b..a7bb13ec57 100644 --- a/validator/client/key_reload_test.go +++ b/validator/client/key_reload_test.go @@ -1,7 +1,6 @@ package client import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -47,7 +46,7 @@ func TestValidator_HandleKeyReload(t *testing.T) { }, ).Return(resp, nil) - anyActive, err := v.HandleKeyReload(context.Background(), [][fieldparams.BLSPubkeyLength]byte{inactive.pub, active.pub}) + anyActive, err := v.HandleKeyReload(t.Context(), [][fieldparams.BLSPubkeyLength]byte{inactive.pub, active.pub}) require.NoError(t, err) assert.Equal(t, true, anyActive) assert.LogsContain(t, hook, "Waiting for deposit to be observed by beacon node") @@ -79,7 +78,7 @@ func TestValidator_HandleKeyReload(t *testing.T) { }, ).Return(resp, nil) - anyActive, err := v.HandleKeyReload(context.Background(), [][fieldparams.BLSPubkeyLength]byte{kp.pub}) + anyActive, err := v.HandleKeyReload(t.Context(), [][fieldparams.BLSPubkeyLength]byte{kp.pub}) require.NoError(t, err) assert.Equal(t, false, anyActive) assert.LogsContain(t, hook, "Waiting for deposit to be observed by beacon node") @@ -103,7 +102,7 @@ func TestValidator_HandleKeyReload(t *testing.T) { }, ).Return(nil, errors.New("error")) - _, err := v.HandleKeyReload(context.Background(), [][fieldparams.BLSPubkeyLength]byte{kp.pub}) + _, err := v.HandleKeyReload(t.Context(), [][fieldparams.BLSPubkeyLength]byte{kp.pub}) assert.ErrorContains(t, "error", err) }) } diff --git a/validator/client/propose_test.go b/validator/client/propose_test.go index ff6a83dcd3..9fcd3cce95 100644 --- a/validator/client/propose_test.go +++ b/validator/client/propose_test.go @@ -113,7 +113,7 @@ func TestProposeBlock_DoesNotProposeGenesisBlock(t *testing.T) { defer finish() var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) - validator.ProposeBlock(context.Background(), 0, pubKey) + validator.ProposeBlock(t.Context(), 0, pubKey) require.LogsContain(t, hook, "Assigned to genesis slot, skipping proposal") }) @@ -134,7 +134,7 @@ func TestProposeBlock_DomainDataFailed(t *testing.T) { gomock.Any(), // epoch ).Return(nil /*response*/, errors.New("uh oh")) - validator.ProposeBlock(context.Background(), 1, pubKey) + validator.ProposeBlock(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Failed to sign randao reveal") }) } @@ -154,7 +154,7 @@ func TestProposeBlock_DomainDataIsNil(t *testing.T) { gomock.Any(), // epoch ).Return(nil /*response*/, nil) - validator.ProposeBlock(context.Background(), 1, pubKey) + validator.ProposeBlock(t.Context(), 1, pubKey) require.LogsContain(t, hook, domainDataErr) }) } @@ -204,7 +204,7 @@ func TestProposeBlock_RequestBlockFailed(t *testing.T) { gomock.AssignableToTypeOf(ðpb.BlockRequest{}), ).Return(nil /*response*/, errors.New("uh oh")) - validator.ProposeBlock(context.Background(), tt.slot, pubKey) + validator.ProposeBlock(t.Context(), tt.slot, pubKey) require.LogsContain(t, hook, "Failed to request block from beacon node") }) } @@ -270,7 +270,7 @@ func TestProposeBlock_ProposeBlockFailed(t *testing.T) { gomock.AssignableToTypeOf(ðpb.GenericSignedBeaconBlock{}), ).Return(nil /*response*/, errors.New("uh oh")) - validator.ProposeBlock(context.Background(), 1, pubKey) + validator.ProposeBlock(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Failed to propose block") @@ -354,7 +354,7 @@ func TestProposeBlock_BlocksDoubleProposal(t *testing.T) { var dummyRoot [32]byte // Save a dummy proposal history at slot 1. - err := validator.db.SaveProposalHistoryForSlot(context.Background(), pubKey, 1, dummyRoot[:]) + err := validator.db.SaveProposalHistoryForSlot(t.Context(), pubKey, 1, dummyRoot[:]) require.NoError(t, err) m.validatorClient.EXPECT().DomainData( @@ -382,10 +382,10 @@ func TestProposeBlock_BlocksDoubleProposal(t *testing.T) { gomock.AssignableToTypeOf(ðpb.GenericSignedBeaconBlock{}), ).Return(ðpb.ProposeResponse{BlockRoot: make([]byte, 32)}, nil /*error*/) - validator.ProposeBlock(context.Background(), slot, pubKey) + validator.ProposeBlock(t.Context(), slot, pubKey) require.LogsDoNotContain(t, hook, failedBlockSignLocalErr) - validator.ProposeBlock(context.Background(), slot, pubKey) + validator.ProposeBlock(t.Context(), slot, pubKey) require.LogsContain(t, hook, failedBlockSignLocalErr) }) } @@ -403,7 +403,7 @@ func TestProposeBlock_BlocksDoubleProposal_After54KEpochs(t *testing.T) { var dummyRoot [32]byte // Save a dummy proposal history at slot 1. - err := validator.db.SaveProposalHistoryForSlot(context.Background(), pubKey, 1, dummyRoot[:]) + err := validator.db.SaveProposalHistoryForSlot(t.Context(), pubKey, 1, dummyRoot[:]) require.NoError(t, err) m.validatorClient.EXPECT().DomainData( @@ -446,10 +446,10 @@ func TestProposeBlock_BlocksDoubleProposal_After54KEpochs(t *testing.T) { gomock.AssignableToTypeOf(ðpb.GenericSignedBeaconBlock{}), ).Return(ðpb.ProposeResponse{BlockRoot: make([]byte, 32)}, nil /*error*/) - validator.ProposeBlock(context.Background(), farFuture, pubKey) + validator.ProposeBlock(t.Context(), farFuture, pubKey) require.LogsDoNotContain(t, hook, failedBlockSignLocalErr) - validator.ProposeBlock(context.Background(), farFuture, pubKey) + validator.ProposeBlock(t.Context(), farFuture, pubKey) require.LogsContain(t, hook, failedBlockSignLocalErr) }) } @@ -481,7 +481,7 @@ func TestProposeBlock_AllowsOrNotPastProposals(t *testing.T) { copy(pubKey[:], validatorKey.PublicKey().Marshal()) // Save a dummy proposal history at slot 0. - err := validator.db.SaveProposalHistoryForSlot(context.Background(), pubKey, 0, []byte{}) + err := validator.db.SaveProposalHistoryForSlot(t.Context(), pubKey, 0, []byte{}) require.NoError(t, err) m.validatorClient.EXPECT().DomainData( @@ -515,7 +515,7 @@ func TestProposeBlock_AllowsOrNotPastProposals(t *testing.T) { gomock.AssignableToTypeOf(ðpb.GenericSignedBeaconBlock{}), ).Times(proposeBeaconBlockCount).Return(ðpb.ProposeResponse{BlockRoot: make([]byte, 32)}, nil /*error*/) - validator.ProposeBlock(context.Background(), slot, pubKey) + validator.ProposeBlock(t.Context(), slot, pubKey) require.LogsDoNotContain(t, hook, failedBlockSignLocalErr) blk2 := util.NewBeaconBlock() @@ -528,7 +528,7 @@ func TestProposeBlock_AllowsOrNotPastProposals(t *testing.T) { Phase0: blk2.Block, }, }, nil /*err*/) - validator.ProposeBlock(context.Background(), tt.pastSlot, pubKey) + validator.ProposeBlock(t.Context(), tt.pastSlot, pubKey) if isSlashingProtectionMinimal { require.LogsContain(t, hook, failedBlockSignLocalErr) } else { @@ -722,7 +722,7 @@ func testProposeBlock(t *testing.T, graffiti []byte) { return ðpb.ProposeResponse{BlockRoot: make([]byte, 32)}, nil }) - validator.ProposeBlock(context.Background(), 1, pubKey) + validator.ProposeBlock(t.Context(), 1, pubKey) g := sentBlock.Block().Body().Graffiti() assert.Equal(t, string(validator.graffiti), string(g[:])) require.LogsContain(t, hook, "Submitted new block") @@ -744,7 +744,7 @@ func TestProposeExit_ValidatorIndexFailed(t *testing.T) { ).Return(nil, errors.New("uh oh")) err := ProposeExit( - context.Background(), + t.Context(), m.validatorClient, m.signfunc, validatorKey.PublicKey().Marshal(), @@ -772,7 +772,7 @@ func TestProposeExit_DomainDataFailed(t *testing.T) { Return(nil, errors.New("uh oh")) err := ProposeExit( - context.Background(), + t.Context(), m.validatorClient, m.signfunc, validatorKey.PublicKey().Marshal(), @@ -801,7 +801,7 @@ func TestProposeExit_DomainDataIsNil(t *testing.T) { Return(nil, nil) err := ProposeExit( - context.Background(), + t.Context(), m.validatorClient, m.signfunc, validatorKey.PublicKey().Marshal(), @@ -833,7 +833,7 @@ func TestProposeBlock_ProposeExitFailed(t *testing.T) { Return(nil, errors.New("uh oh")) err := ProposeExit( - context.Background(), + t.Context(), m.validatorClient, m.signfunc, validatorKey.PublicKey().Marshal(), @@ -865,7 +865,7 @@ func TestProposeExit_BroadcastsBlock(t *testing.T) { Return(ðpb.ProposeExitResponse{}, nil) assert.NoError(t, ProposeExit( - context.Background(), + t.Context(), m.validatorClient, m.signfunc, validatorKey.PublicKey().Marshal(), @@ -885,7 +885,7 @@ func TestSignBlock(t *testing.T) { m.validatorClient.EXPECT(). DomainData(gomock.Any(), gomock.Any()). Return(ðpb.DomainResponse{SignatureDomain: proposerDomain}, nil) - ctx := context.Background() + ctx := t.Context() blk := util.NewBeaconBlock() blk.Block.Slot = 1 blk.Block.ProposerIndex = 100 @@ -924,7 +924,7 @@ func TestSignAltairBlock(t *testing.T) { m.validatorClient.EXPECT(). DomainData(gomock.Any(), gomock.Any()). Return(ðpb.DomainResponse{SignatureDomain: proposerDomain}, nil) - ctx := context.Background() + ctx := t.Context() blk := util.NewBeaconBlockAltair() blk.Block.Slot = 1 blk.Block.ProposerIndex = 100 @@ -955,7 +955,7 @@ func TestSignBellatrixBlock(t *testing.T) { DomainData(gomock.Any(), gomock.Any()). Return(ðpb.DomainResponse{SignatureDomain: proposerDomain}, nil) - ctx := context.Background() + ctx := t.Context() blk := util.NewBeaconBlockBellatrix() blk.Block.Slot = 1 blk.Block.ProposerIndex = 100 @@ -1094,7 +1094,7 @@ func TestGetGraffiti_Ok(t *testing.T) { ValidatorIndex(gomock.Any(), ðpb.ValidatorIndexRequest{PublicKey: pubKey[:]}). Return(ðpb.ValidatorIndexResponse{Index: 2}, nil) } - got, err := tt.v.Graffiti(context.Background(), pubKey) + got, err := tt.v.Graffiti(t.Context(), pubKey) require.NoError(t, err) require.DeepEqual(t, tt.want, got) }) @@ -1124,7 +1124,7 @@ func TestGetGraffitiOrdered_Ok(t *testing.T) { }, } for _, want := range [][]byte{bytesutil.PadTo([]byte{'a'}, 32), bytesutil.PadTo([]byte{'b'}, 32), bytesutil.PadTo([]byte{'c'}, 32), bytesutil.PadTo([]byte{'d'}, 32), bytesutil.PadTo([]byte{'d'}, 32)} { - got, err := v.Graffiti(context.Background(), pubKey) + got, err := v.Graffiti(t.Context(), pubKey) require.NoError(t, err) require.DeepEqual(t, want, got) } @@ -1191,7 +1191,7 @@ func Test_validator_DeleteGraffiti(t *testing.T) { db: testing2.SetupDB(t, t.TempDir(), [][fieldparams.BLSPubkeyLength]byte{pubKey}, false), proposerSettings: tt.proposerSettings, } - err := v.DeleteGraffiti(context.Background(), pubKey) + err := v.DeleteGraffiti(t.Context(), pubKey) if tt.wantErr != "" { require.ErrorContains(t, tt.wantErr, err) } else { @@ -1271,7 +1271,7 @@ func Test_validator_SetGraffiti(t *testing.T) { db: testing2.SetupDB(t, t.TempDir(), [][fieldparams.BLSPubkeyLength]byte{pubKey}, false), proposerSettings: tt.proposerSettings, } - err := v.SetGraffiti(context.Background(), pubKey, []byte(tt.graffiti)) + err := v.SetGraffiti(t.Context(), pubKey, []byte(tt.graffiti)) if tt.wantErr != "" { require.ErrorContains(t, tt.wantErr, err) } else { diff --git a/validator/client/registration_test.go b/validator/client/registration_test.go index 30f012f3e3..c741bcb218 100644 --- a/validator/client/registration_test.go +++ b/validator/client/registration_test.go @@ -1,7 +1,6 @@ package client import ( - "context" "fmt" "testing" "time" @@ -22,7 +21,7 @@ func TestSubmitValidatorRegistrations(t *testing.T) { _, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal) defer finish() - ctx := context.Background() + ctx := t.Context() validatorRegsBatchSize := 2 require.NoError(t, nil, SubmitValidatorRegistrations(ctx, m.validatorClient, []*ethpb.SignedValidatorRegistrationV1{}, validatorRegsBatchSize)) @@ -103,7 +102,7 @@ func TestSubmitValidatorRegistration_CantSign(t *testing.T) { _, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal) defer finish() - ctx := context.Background() + ctx := t.Context() validatorRegsBatchSize := 500 reg := ðpb.ValidatorRegistrationV1{ FeeRecipient: bytesutil.PadTo([]byte("fee"), 20), @@ -134,7 +133,7 @@ func Test_signValidatorRegistration(t *testing.T) { _, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal) defer finish() - ctx := context.Background() + ctx := t.Context() reg := ðpb.ValidatorRegistrationV1{ FeeRecipient: bytesutil.PadTo([]byte("fee"), 20), GasLimit: 123456, @@ -151,7 +150,7 @@ func TestValidator_SignValidatorRegistrationRequest(t *testing.T) { for _, isSlashingProtectionMinimal := range [...]bool{false, true} { _, m, validatorKey, finish := setup(t, isSlashingProtectionMinimal) defer finish() - ctx := context.Background() + ctx := t.Context() byteval, err := hexutil.Decode("0x878705ba3f8bc32fcf7f4caa1a35e72af65cf766") require.NoError(t, err) tests := []struct { diff --git a/validator/client/runner_test.go b/validator/client/runner_test.go index bfba0206fc..dd3b408aa5 100644 --- a/validator/client/runner_test.go +++ b/validator/client/runner_test.go @@ -81,7 +81,7 @@ func TestRetry_On_ConnectionError(t *testing.T) { RetryTillSuccess: retry, } backOffPeriod = 10 * time.Millisecond - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) go run(ctx, v) // each step will fail (retry times)=10 this sleep times will wait more then // the time it takes for all steps to succeed before main loop. @@ -114,9 +114,9 @@ func TestUpdateDuties_NextSlot(t *testing.T) { tracker := health.NewTracker(node) node.EXPECT().IsHealthy(gomock.Any()).Return(true).AnyTimes() // avoid race condition between the cancellation of the context in the go stream from slot and the setting of IsHealthy - _ = tracker.CheckHealth(context.Background()) + _ = tracker.CheckHealth(t.Context()) v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, Tracker: tracker} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) slot := primitives.Slot(55) ticker := make(chan primitives.Slot) @@ -140,9 +140,9 @@ func TestUpdateDuties_HandlesError(t *testing.T) { tracker := health.NewTracker(node) node.EXPECT().IsHealthy(gomock.Any()).Return(true).AnyTimes() // avoid race condition between the cancellation of the context in the go stream from slot and the setting of IsHealthy - _ = tracker.CheckHealth(context.Background()) + _ = tracker.CheckHealth(t.Context()) v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, Tracker: tracker} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) slot := primitives.Slot(55) ticker := make(chan primitives.Slot) @@ -166,9 +166,9 @@ func TestRoleAt_NextSlot(t *testing.T) { tracker := health.NewTracker(node) node.EXPECT().IsHealthy(gomock.Any()).Return(true).AnyTimes() // avoid race condition between the cancellation of the context in the go stream from slot and the setting of IsHealthy - _ = tracker.CheckHealth(context.Background()) + _ = tracker.CheckHealth(t.Context()) v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, Tracker: tracker} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) slot := primitives.Slot(55) ticker := make(chan primitives.Slot) @@ -192,10 +192,10 @@ func TestAttests_NextSlot(t *testing.T) { tracker := health.NewTracker(node) node.EXPECT().IsHealthy(gomock.Any()).Return(true).AnyTimes() // avoid race condition between the cancellation of the context in the go stream from slot and the setting of IsHealthy - _ = tracker.CheckHealth(context.Background()) + _ = tracker.CheckHealth(t.Context()) attSubmitted := make(chan interface{}) v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, Tracker: tracker, AttSubmitted: attSubmitted} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) slot := primitives.Slot(55) ticker := make(chan primitives.Slot) @@ -219,10 +219,10 @@ func TestProposes_NextSlot(t *testing.T) { tracker := health.NewTracker(node) node.EXPECT().IsHealthy(gomock.Any()).Return(true).AnyTimes() // avoid race condition between the cancellation of the context in the go stream from slot and the setting of IsHealthy - _ = tracker.CheckHealth(context.Background()) + _ = tracker.CheckHealth(t.Context()) blockProposed := make(chan interface{}) v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, Tracker: tracker, BlockProposed: blockProposed} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) slot := primitives.Slot(55) ticker := make(chan primitives.Slot) @@ -247,11 +247,11 @@ func TestBothProposesAndAttests_NextSlot(t *testing.T) { tracker := health.NewTracker(node) node.EXPECT().IsHealthy(gomock.Any()).Return(true).AnyTimes() // avoid race condition between the cancellation of the context in the go stream from slot and the setting of IsHealthy - _ = tracker.CheckHealth(context.Background()) + _ = tracker.CheckHealth(t.Context()) blockProposed := make(chan interface{}) attSubmitted := make(chan interface{}) v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, Tracker: tracker, BlockProposed: blockProposed, AttSubmitted: attSubmitted} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) slot := primitives.Slot(55) ticker := make(chan primitives.Slot) @@ -272,7 +272,7 @@ func TestBothProposesAndAttests_NextSlot(t *testing.T) { } func TestKeyReload_ActiveKey(t *testing.T) { - ctx := context.Background() + ctx := t.Context() km := &mockKeymanager{} ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -290,7 +290,7 @@ func TestKeyReload_ActiveKey(t *testing.T) { func TestKeyReload_NoActiveKey(t *testing.T) { na := notActive(t) - ctx := context.Background() + ctx := t.Context() km := &mockKeymanager{} ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -324,7 +324,7 @@ func TestUpdateProposerSettingsAt_EpochStart(t *testing.T) { tracker := health.NewTracker(node) node.EXPECT().IsHealthy(gomock.Any()).Return(true).AnyTimes() v := &testutil.FakeValidator{Km: &mockKeymanager{accountsChangedFeed: &event.Feed{}}, Tracker: tracker} - err := v.SetProposerSettings(context.Background(), &proposer.Settings{ + err := v.SetProposerSettings(t.Context(), &proposer.Settings{ DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"), @@ -332,7 +332,7 @@ func TestUpdateProposerSettingsAt_EpochStart(t *testing.T) { }, }) require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) hook := logTest.NewGlobal() slot := params.BeaconConfig().SlotsPerEpoch ticker := make(chan primitives.Slot) @@ -358,7 +358,7 @@ func TestUpdateProposerSettingsAt_EpochEndOk(t *testing.T) { ProposerSettingWait: time.Duration(params.BeaconConfig().SecondsPerSlot-1) * time.Second, Tracker: tracker, } - err := v.SetProposerSettings(context.Background(), &proposer.Settings{ + err := v.SetProposerSettings(t.Context(), &proposer.Settings{ DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"), @@ -366,7 +366,7 @@ func TestUpdateProposerSettingsAt_EpochEndOk(t *testing.T) { }, }) require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) hook := logTest.NewGlobal() slot := params.BeaconConfig().SlotsPerEpoch - 1 //have it set close to the end of epoch ticker := make(chan primitives.Slot) diff --git a/validator/client/service_test.go b/validator/client/service_test.go index 493f954019..0cfaac6a19 100644 --- a/validator/client/service_test.go +++ b/validator/client/service_test.go @@ -16,7 +16,7 @@ import ( var _ runtime.Service = (*ValidatorService)(nil) func TestStop_CancelsContext(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) vs := &ValidatorService{ ctx: ctx, cancel: cancel, @@ -33,7 +33,7 @@ func TestStop_CancelsContext(t *testing.T) { func TestNew_Insecure(t *testing.T) { hook := logTest.NewGlobal() - _, err := NewValidatorService(context.Background(), &Config{}) + _, err := NewValidatorService(t.Context(), &Config{}) require.NoError(t, err) require.LogsContain(t, hook, "You are using an insecure gRPC connection") } @@ -45,7 +45,7 @@ func TestStatus_NoConnectionError(t *testing.T) { func TestStart_GrpcHeaders(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() for input, output := range map[string][]string{ "should-break": {}, "key=value": {"key", "value"}, diff --git a/validator/client/slashing_protection_interchange_test.go b/validator/client/slashing_protection_interchange_test.go index 90443e4dd5..9e0bc5dd8c 100644 --- a/validator/client/slashing_protection_interchange_test.go +++ b/validator/client/slashing_protection_interchange_test.go @@ -2,7 +2,6 @@ package client import ( "bytes" - "context" "encoding/hex" "encoding/json" "fmt" @@ -98,7 +97,7 @@ func TestEIP3076SpecTests(t *testing.T) { for _, step := range tt.Steps { if tt.GenesisValidatorsRoot != "" { r, err := helpers.RootFromHex(tt.GenesisValidatorsRoot) - require.NoError(t, validator.db.SaveGenesisValidatorsRoot(context.Background(), r[:])) + require.NoError(t, validator.db.SaveGenesisValidatorsRoot(t.Context(), r[:])) require.NoError(t, err) } @@ -109,7 +108,7 @@ func TestEIP3076SpecTests(t *testing.T) { t.Fatal(err) } b := bytes.NewBuffer(interchangeBytes) - if err := validator.db.ImportStandardProtectionJSON(context.Background(), b); err != nil { + if err := validator.db.ImportStandardProtectionJSON(t.Context(), b); err != nil { if step.ShouldSucceed { t.Fatal(err) } @@ -140,7 +139,7 @@ func TestEIP3076SpecTests(t *testing.T) { wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - err = validator.db.SlashableProposalCheck(context.Background(), pk, wsb, signingRoot, validator.emitAccountMetrics, ValidatorProposeFailVec) + err = validator.db.SlashableProposalCheck(t.Context(), pk, wsb, signingRoot, validator.emitAccountMetrics, ValidatorProposeFailVec) if shouldSucceed { require.NoError(t, err) } else { @@ -177,7 +176,7 @@ func TestEIP3076SpecTests(t *testing.T) { copy(signingRoot[:], signingRootBytes) } - err = validator.db.SlashableAttestationCheck(context.Background(), ia, pk, signingRoot, false, nil) + err = validator.db.SlashableAttestationCheck(t.Context(), ia, pk, signingRoot, false, nil) if shouldSucceed { require.NoError(t, err) } else { diff --git a/validator/client/sync_committee_test.go b/validator/client/sync_committee_test.go index 625210e4c9..d0550d64a0 100644 --- a/validator/client/sync_committee_test.go +++ b/validator/client/sync_committee_test.go @@ -37,7 +37,7 @@ func TestSubmitSyncCommitteeMessage_ValidatorDutiesRequestFailure(t *testing.T) var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) - validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey) + validator.SubmitSyncCommitteeMessage(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Could not fetch validator assignment") }) } @@ -73,7 +73,7 @@ func TestSubmitSyncCommitteeMessage_BadDomainData(t *testing.T) { var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) - validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey) + validator.SubmitSyncCommitteeMessage(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Could not get sync committee domain data") }) } @@ -117,7 +117,7 @@ func TestSubmitSyncCommitteeMessage_CouldNotSubmit(t *testing.T) { var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) - validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey) + validator.SubmitSyncCommitteeMessage(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Could not submit sync committee message") }) @@ -165,7 +165,7 @@ func TestSubmitSyncCommitteeMessage_OK(t *testing.T) { var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) - validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey) + validator.SubmitSyncCommitteeMessage(t.Context(), 1, pubKey) require.LogsDoNotContain(t, hook, "Could not") require.Equal(t, primitives.Slot(1), generatedMsg.Slot) @@ -185,7 +185,7 @@ func TestSubmitSignedContributionAndProof_ValidatorDutiesRequestFailure(t *testi var pubKey [fieldparams.BLSPubkeyLength]byte copy(pubKey[:], validatorKey.PublicKey().Marshal()) - validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey) + validator.SubmitSignedContributionAndProof(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Could not fetch validator assignment") }) } @@ -217,7 +217,7 @@ func TestSubmitSignedContributionAndProof_SyncSubcommitteeIndexFailure(t *testin }, ).Return(ðpb.SyncSubcommitteeIndexResponse{}, errors.New("Bad index")) - validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey) + validator.SubmitSignedContributionAndProof(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Could not get sync subcommittee index") }) } @@ -249,7 +249,7 @@ func TestSubmitSignedContributionAndProof_NothingToDo(t *testing.T) { }, ).Return(ðpb.SyncSubcommitteeIndexResponse{Indices: []primitives.CommitteeIndex{}}, nil) - validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey) + validator.SubmitSignedContributionAndProof(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Empty subcommittee index list, do nothing") }) } @@ -288,7 +288,7 @@ func TestSubmitSignedContributionAndProof_BadDomain(t *testing.T) { SignatureDomain: make([]byte, 32), }, errors.New("bad domain response")) - validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey) + validator.SubmitSignedContributionAndProof(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Could not get selection proofs") require.LogsContain(t, hook, "bad domain response") }) @@ -343,7 +343,7 @@ func TestSubmitSignedContributionAndProof_CouldNotGetContribution(t *testing.T) }, ).Return(nil, errors.New("Bad contribution")) - validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey) + validator.SubmitSignedContributionAndProof(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Could not get sync committee contribution") }) } @@ -426,7 +426,7 @@ func TestSubmitSignedContributionAndProof_CouldNotSubmitContribution(t *testing. }), ).Return(&emptypb.Empty{}, errors.New("Could not submit contribution")) - validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey) + validator.SubmitSignedContributionAndProof(t.Context(), 1, pubKey) require.LogsContain(t, hook, "Could not submit signed contribution and proof") }) } @@ -508,7 +508,7 @@ func TestSubmitSignedContributionAndProof_Ok(t *testing.T) { }), ).Return(&emptypb.Empty{}, nil) - validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey) + validator.SubmitSignedContributionAndProof(t.Context(), 1, pubKey) }) } } diff --git a/validator/client/validator_test.go b/validator/client/validator_test.go index 0d899f7032..d55db4389a 100644 --- a/validator/client/validator_test.go +++ b/validator/client/validator_test.go @@ -172,7 +172,7 @@ func TestWaitForChainStart_SetsGenesisInfo(t *testing.T) { } // Make sure its clean at the start. - savedGenValRoot, err := db.GenesisValidatorsRoot(context.Background()) + savedGenValRoot, err := db.GenesisValidatorsRoot(t.Context()) require.NoError(t, err) assert.DeepEqual(t, []byte(nil), savedGenValRoot, "Unexpected saved genesis validators root") @@ -186,8 +186,8 @@ func TestWaitForChainStart_SetsGenesisInfo(t *testing.T) { GenesisTime: genesis, GenesisValidatorsRoot: genesisValidatorsRoot[:], }, nil) - require.NoError(t, v.WaitForChainStart(context.Background())) - savedGenValRoot, err = db.GenesisValidatorsRoot(context.Background()) + require.NoError(t, v.WaitForChainStart(t.Context())) + savedGenValRoot, err = db.GenesisValidatorsRoot(t.Context()) require.NoError(t, err) assert.DeepEqual(t, genesisValidatorsRoot[:], savedGenValRoot, "Unexpected saved genesis validators root") @@ -203,7 +203,7 @@ func TestWaitForChainStart_SetsGenesisInfo(t *testing.T) { GenesisTime: genesis, GenesisValidatorsRoot: genesisValidatorsRoot[:], }, nil) - require.NoError(t, v.WaitForChainStart(context.Background())) + require.NoError(t, v.WaitForChainStart(t.Context())) }) } } @@ -230,8 +230,8 @@ func TestWaitForChainStart_SetsGenesisInfo_IncorrectSecondTry(t *testing.T) { GenesisTime: genesis, GenesisValidatorsRoot: genesisValidatorsRoot[:], }, nil) - require.NoError(t, v.WaitForChainStart(context.Background())) - savedGenValRoot, err := db.GenesisValidatorsRoot(context.Background()) + require.NoError(t, v.WaitForChainStart(t.Context())) + savedGenValRoot, err := db.GenesisValidatorsRoot(t.Context()) require.NoError(t, err) assert.DeepEqual(t, genesisValidatorsRoot[:], savedGenValRoot, "Unexpected saved genesis validators root") @@ -249,7 +249,7 @@ func TestWaitForChainStart_SetsGenesisInfo_IncorrectSecondTry(t *testing.T) { GenesisTime: genesis, GenesisValidatorsRoot: genesisValidatorsRoot[:], }, nil) - err = v.WaitForChainStart(context.Background()) + err = v.WaitForChainStart(t.Context()) require.ErrorContains(t, "does not match root saved", err) }) } @@ -274,7 +274,7 @@ func TestWaitForChainStart_ContextCanceled(t *testing.T) { GenesisTime: genesis, GenesisValidatorsRoot: genesisValidatorsRoot, }, nil) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() assert.ErrorContains(t, cancelledCtx, v.WaitForChainStart(ctx)) } @@ -291,7 +291,7 @@ func TestWaitForChainStart_ReceiveErrorFromStream(t *testing.T) { gomock.Any(), &emptypb.Empty{}, ).Return(nil, errors.New("fails")) - err := v.WaitForChainStart(context.Background()) + err := v.WaitForChainStart(t.Context()) want := "could not receive ChainStart from stream" assert.ErrorContains(t, want, err) } @@ -308,7 +308,7 @@ func TestCanonicalHeadSlot_FailedRPC(t *testing.T) { gomock.Any(), gomock.Any(), ).Return(nil, errors.New("failed")) - _, err := v.CanonicalHeadSlot(context.Background()) + _, err := v.CanonicalHeadSlot(t.Context()) assert.ErrorContains(t, "failed", err) } @@ -323,7 +323,7 @@ func TestCanonicalHeadSlot_OK(t *testing.T) { gomock.Any(), gomock.Any(), ).Return(ðpb.ChainHead{HeadSlot: 0}, nil) - headSlot, err := v.CanonicalHeadSlot(context.Background()) + headSlot, err := v.CanonicalHeadSlot(t.Context()) require.NoError(t, err) assert.Equal(t, primitives.Slot(0), headSlot, "Mismatch slots") } @@ -337,7 +337,7 @@ func TestWaitSync_ContextCanceled(t *testing.T) { nodeClient: n, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() n.EXPECT().SyncStatus( @@ -362,7 +362,7 @@ func TestWaitSync_NotSyncing(t *testing.T) { gomock.Any(), ).Return(ðpb.SyncStatus{Syncing: false}, nil) - require.NoError(t, v.WaitForSync(context.Background())) + require.NoError(t, v.WaitForSync(t.Context())) } func TestWaitSync_Syncing(t *testing.T) { @@ -384,7 +384,7 @@ func TestWaitSync_Syncing(t *testing.T) { gomock.Any(), ).Return(ðpb.SyncStatus{Syncing: false}, nil) - require.NoError(t, v.WaitForSync(context.Background())) + require.NoError(t, v.WaitForSync(t.Context())) } func TestUpdateDuties_DoesNothingWhenNotEpochStart_AlreadyExistingAssignments(t *testing.T) { @@ -415,7 +415,7 @@ func TestUpdateDuties_DoesNothingWhenNotEpochStart_AlreadyExistingAssignments(t gomock.Any(), ).Times(1) - assert.NoError(t, v.UpdateDuties(context.Background()), "Could not update assignments") + assert.NoError(t, v.UpdateDuties(t.Context()), "Could not update assignments") } func TestUpdateDuties_ReturnsError(t *testing.T) { @@ -442,7 +442,7 @@ func TestUpdateDuties_ReturnsError(t *testing.T) { gomock.Any(), ).Return(nil, expected) - assert.ErrorContains(t, expected.Error(), v.UpdateDuties(context.Background())) + assert.ErrorContains(t, expected.Error(), v.UpdateDuties(t.Context())) assert.Equal(t, (*ethpb.ValidatorDutiesContainer)(nil), v.duties, "Assignments should have been cleared on failure") } @@ -484,7 +484,7 @@ func TestUpdateDuties_OK(t *testing.T) { return nil, nil }) - require.NoError(t, v.UpdateDuties(context.Background()), "Could not update assignments") + require.NoError(t, v.UpdateDuties(t.Context()), "Could not update assignments") util.WaitTimeout(&wg, 2*time.Second) @@ -531,7 +531,7 @@ func TestUpdateDuties_OK_FilterBlacklistedPublicKeys(t *testing.T) { return nil, nil }) - require.NoError(t, v.UpdateDuties(context.Background()), "Could not update assignments") + require.NoError(t, v.UpdateDuties(t.Context()), "Could not update assignments") util.WaitTimeout(&wg, 2*time.Second) @@ -576,7 +576,7 @@ func TestUpdateDuties_AllValidatorsExited(t *testing.T) { gomock.Any(), ).Return(resp, nil) - err := v.UpdateDuties(context.Background()) + err := v.UpdateDuties(t.Context()) require.ErrorContains(t, ErrValidatorsAllExited.Error(), err) } @@ -662,7 +662,7 @@ func TestUpdateDuties_Distributed(t *testing.T) { return nil, nil }) - require.NoError(t, v.UpdateDuties(context.Background()), "Could not update assignments") + require.NoError(t, v.UpdateDuties(t.Context()), "Could not update assignments") util.WaitTimeout(&wg, 2*time.Second) require.Equal(t, 2, len(v.attSelections)) } @@ -705,7 +705,7 @@ func TestRolesAt_OK(t *testing.T) { }, ).Return(ðpb.SyncSubcommitteeIndexResponse{}, nil /*err*/) - roleMap, err := v.RolesAt(context.Background(), 1) + roleMap, err := v.RolesAt(t.Context(), 1) require.NoError(t, err) assert.Equal(t, iface.RoleAttester, roleMap[bytesutil.ToBytes48(validatorKey.PublicKey().Marshal())][0]) @@ -740,7 +740,7 @@ func TestRolesAt_OK(t *testing.T) { }, ).Return(ðpb.SyncSubcommitteeIndexResponse{}, nil /*err*/) - roleMap, err = v.RolesAt(context.Background(), params.BeaconConfig().SlotsPerEpoch-1) + roleMap, err = v.RolesAt(t.Context(), params.BeaconConfig().SlotsPerEpoch-1) require.NoError(t, err) assert.Equal(t, iface.RoleSyncCommittee, roleMap[bytesutil.ToBytes48(validatorKey.PublicKey().Marshal())][0]) }) @@ -769,7 +769,7 @@ func TestRolesAt_DoesNotAssignProposer_Slot0(t *testing.T) { gomock.Any(), // epoch ).Return(ðpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil /*err*/) - roleMap, err := v.RolesAt(context.Background(), 0) + roleMap, err := v.RolesAt(t.Context(), 0) require.NoError(t, err) assert.Equal(t, iface.RoleAttester, roleMap[bytesutil.ToBytes48(validatorKey.PublicKey().Marshal())][0]) @@ -923,7 +923,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { validatorSetter: func(t *testing.T) *validator { client := validatormock.NewMockValidatorClient(ctrl) km := genMockKeymanager(t, 10) - keys, err := km.FetchValidatingPublicKeys(context.Background()) + keys, err := km.FetchValidatingPublicKeys(t.Context()) assert.NoError(t, err) db := dbTest.SetupDB(t, t.TempDir(), keys, isSlashingProtectionMinimal) req := ðpb.DoppelGangerRequest{ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{}} @@ -932,7 +932,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { att := createAttestation(10, 12) rt, err := att.Data.HashTreeRoot() assert.NoError(t, err) - assert.NoError(t, db.SaveAttestationForPubKey(context.Background(), pkey, rt, att)) + assert.NoError(t, db.SaveAttestationForPubKey(t.Context(), pkey, rt, att)) signedRoot := rt[:] if isSlashingProtectionMinimal { signedRoot = nil @@ -957,7 +957,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { validatorSetter: func(t *testing.T) *validator { client := validatormock.NewMockValidatorClient(ctrl) km := genMockKeymanager(t, 10) - keys, err := km.FetchValidatingPublicKeys(context.Background()) + keys, err := km.FetchValidatingPublicKeys(t.Context()) assert.NoError(t, err) db := dbTest.SetupDB(t, t.TempDir(), keys, isSlashingProtectionMinimal) req := ðpb.DoppelGangerRequest{ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{}} @@ -967,7 +967,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { att := createAttestation(10, 12) rt, err := att.Data.HashTreeRoot() assert.NoError(t, err) - assert.NoError(t, db.SaveAttestationForPubKey(context.Background(), pkey, rt, att)) + assert.NoError(t, db.SaveAttestationForPubKey(t.Context(), pkey, rt, att)) if i%3 == 0 { resp.Responses = append(resp.Responses, ðpb.DoppelGangerResponse_ValidatorResponse{PublicKey: pkey[:], DuplicateExists: true}) } @@ -998,7 +998,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { validatorSetter: func(t *testing.T) *validator { client := validatormock.NewMockValidatorClient(ctrl) km := genMockKeymanager(t, 10) - keys, err := km.FetchValidatingPublicKeys(context.Background()) + keys, err := km.FetchValidatingPublicKeys(t.Context()) assert.NoError(t, err) db := dbTest.SetupDB(t, t.TempDir(), keys, isSlashingProtectionMinimal) req := ðpb.DoppelGangerRequest{ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{}} @@ -1008,7 +1008,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { att := createAttestation(10, 12) rt, err := att.Data.HashTreeRoot() assert.NoError(t, err) - assert.NoError(t, db.SaveAttestationForPubKey(context.Background(), pkey, rt, att)) + assert.NoError(t, db.SaveAttestationForPubKey(t.Context(), pkey, rt, att)) if i%9 == 0 { resp.Responses = append(resp.Responses, ðpb.DoppelGangerResponse_ValidatorResponse{PublicKey: pkey[:], DuplicateExists: true}) } @@ -1037,7 +1037,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { validatorSetter: func(t *testing.T) *validator { client := validatormock.NewMockValidatorClient(ctrl) km := genMockKeymanager(t, 10) - keys, err := km.FetchValidatingPublicKeys(context.Background()) + keys, err := km.FetchValidatingPublicKeys(t.Context()) assert.NoError(t, err) db := dbTest.SetupDB(t, t.TempDir(), keys, isSlashingProtectionMinimal) req := ðpb.DoppelGangerRequest{ValidatorRequests: []*ethpb.DoppelGangerRequest_ValidatorRequest{}} @@ -1049,7 +1049,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { att := createAttestation(10+primitives.Epoch(j), 12+primitives.Epoch(j)) rt, err := att.Data.HashTreeRoot() assert.NoError(t, err) - assert.NoError(t, db.SaveAttestationForPubKey(context.Background(), pkey, rt, att)) + assert.NoError(t, db.SaveAttestationForPubKey(t.Context(), pkey, rt, att)) signedRoot := rt[:] if isSlashingProtectionMinimal { @@ -1083,7 +1083,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { client := validatormock.NewMockValidatorClient(ctrl) // Use only 1 key for deterministic order. km := genMockKeymanager(t, 1) - keys, err := km.FetchValidatingPublicKeys(context.Background()) + keys, err := km.FetchValidatingPublicKeys(t.Context()) assert.NoError(t, err) db := dbTest.SetupDB(t, t.TempDir(), keys, isSlashingProtectionMinimal) resp := ðpb.DoppelGangerResponse{Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{}} @@ -1109,7 +1109,7 @@ func TestValidator_CheckDoppelGanger(t *testing.T) { for _, tt := range tests { t.Run(fmt.Sprintf("%s/isSlashingProtectionMinimal:%v", tt.name, isSlashingProtectionMinimal), func(t *testing.T) { v := tt.validatorSetter(t) - if err := v.CheckDoppelGanger(context.Background()); tt.err != "" { + if err := v.CheckDoppelGanger(t.Context()); tt.err != "" { assert.ErrorContains(t, tt.err, err) } }) @@ -1121,7 +1121,7 @@ func TestValidatorAttestationsAreOrdered(t *testing.T) { for _, isSlashingProtectionMinimal := range [...]bool{false, true} { t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) { km := genMockKeymanager(t, 10) - keys, err := km.FetchValidatingPublicKeys(context.Background()) + keys, err := km.FetchValidatingPublicKeys(t.Context()) assert.NoError(t, err) db := dbTest.SetupDB(t, t.TempDir(), keys, isSlashingProtectionMinimal) @@ -1129,13 +1129,13 @@ func TestValidatorAttestationsAreOrdered(t *testing.T) { att := createAttestation(10, 14) rt, err := att.Data.HashTreeRoot() assert.NoError(t, err) - assert.NoError(t, db.SaveAttestationForPubKey(context.Background(), k, rt, att)) + assert.NoError(t, db.SaveAttestationForPubKey(t.Context(), k, rt, att)) att = createAttestation(6, 8) rt, err = att.Data.HashTreeRoot() assert.NoError(t, err) - err = db.SaveAttestationForPubKey(context.Background(), k, rt, att) + err = db.SaveAttestationForPubKey(t.Context(), k, rt, att) if isSlashingProtectionMinimal { assert.ErrorContains(t, "could not sign attestation with source lower than recorded source epoch", err) } else { @@ -1146,7 +1146,7 @@ func TestValidatorAttestationsAreOrdered(t *testing.T) { rt, err = att.Data.HashTreeRoot() assert.NoError(t, err) - err = db.SaveAttestationForPubKey(context.Background(), k, rt, att) + err = db.SaveAttestationForPubKey(t.Context(), k, rt, att) if isSlashingProtectionMinimal { assert.ErrorContains(t, "could not sign attestation with target lower than or equal to recorded target epoch", err) } else { @@ -1157,7 +1157,7 @@ func TestValidatorAttestationsAreOrdered(t *testing.T) { rt, err = att.Data.HashTreeRoot() assert.NoError(t, err) - err = db.SaveAttestationForPubKey(context.Background(), k, rt, att) + err = db.SaveAttestationForPubKey(t.Context(), k, rt, att) if isSlashingProtectionMinimal { assert.ErrorContains(t, "could not sign attestation with source lower than recorded source epoch", err) } else { @@ -1202,7 +1202,7 @@ func TestIsSyncCommitteeAggregator_OK(t *testing.T) { }, ).Return(ðpb.SyncSubcommitteeIndexResponse{}, nil /*err*/) - aggregator, err := v.isSyncCommitteeAggregator(context.Background(), slot, map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{ + aggregator, err := v.isSyncCommitteeAggregator(t.Context(), slot, map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{ 0: bytesutil.ToBytes48(pubKey), }) require.NoError(t, err) @@ -1225,7 +1225,7 @@ func TestIsSyncCommitteeAggregator_OK(t *testing.T) { }, ).Return(ðpb.SyncSubcommitteeIndexResponse{Indices: []primitives.CommitteeIndex{0}}, nil /*err*/) - aggregator, err = v.isSyncCommitteeAggregator(context.Background(), slot, map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{ + aggregator, err = v.isSyncCommitteeAggregator(t.Context(), slot, map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{ 0: bytesutil.ToBytes48(pubKey), }) require.NoError(t, err) @@ -1253,7 +1253,7 @@ func TestIsSyncCommitteeAggregator_Distributed_OK(t *testing.T) { }, ).Return(ðpb.SyncSubcommitteeIndexResponse{}, nil /*err*/) - aggregator, err := v.isSyncCommitteeAggregator(context.Background(), slot, map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{ + aggregator, err := v.isSyncCommitteeAggregator(t.Context(), slot, map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{ 0: bytesutil.ToBytes48(pubKey), }) require.NoError(t, err) @@ -1276,7 +1276,7 @@ func TestIsSyncCommitteeAggregator_Distributed_OK(t *testing.T) { }, ).Return(ðpb.SyncSubcommitteeIndexResponse{Indices: []primitives.CommitteeIndex{0}}, nil /*err*/) - sig, err := v.signSyncSelectionData(context.Background(), bytesutil.ToBytes48(pubKey), 0, slot) + sig, err := v.signSyncSelectionData(t.Context(), bytesutil.ToBytes48(pubKey), 0, slot) require.NoError(t, err) selection := iface.SyncCommitteeSelection{ @@ -1290,7 +1290,7 @@ func TestIsSyncCommitteeAggregator_Distributed_OK(t *testing.T) { []iface.SyncCommitteeSelection{selection}, ).Return([]iface.SyncCommitteeSelection{selection}, nil) - aggregator, err = v.isSyncCommitteeAggregator(context.Background(), slot, map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{ + aggregator, err = v.isSyncCommitteeAggregator(t.Context(), slot, map[primitives.ValidatorIndex][fieldparams.BLSPubkeyLength]byte{ 123: bytesutil.ToBytes48(pubKey), }) require.NoError(t, err) @@ -1302,7 +1302,7 @@ func TestIsSyncCommitteeAggregator_Distributed_OK(t *testing.T) { func TestValidator_WaitForKeymanagerInitialization_web3Signer(t *testing.T) { for _, isSlashingProtectionMinimal := range [...]bool{false, true} { t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbTest.SetupDB(t, t.TempDir(), [][fieldparams.BLSPubkeyLength]byte{}, isSlashingProtectionMinimal) root := make([]byte, 32) copy(root[2:], "a") @@ -1323,7 +1323,7 @@ func TestValidator_WaitForKeymanagerInitialization_web3Signer(t *testing.T) { ProvidedPublicKeys: []string{"0xa2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820"}, }, } - err = v.WaitForKeymanagerInitialization(context.Background()) + err = v.WaitForKeymanagerInitialization(t.Context()) require.NoError(t, err) km, err := v.Keymanager() require.NoError(t, err) @@ -1335,7 +1335,7 @@ func TestValidator_WaitForKeymanagerInitialization_web3Signer(t *testing.T) { func TestValidator_WaitForKeymanagerInitialization_Web(t *testing.T) { for _, isSlashingProtectionMinimal := range [...]bool{false, true} { t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbTest.SetupDB(t, t.TempDir(), [][fieldparams.BLSPubkeyLength]byte{}, isSlashingProtectionMinimal) root := make([]byte, 32) copy(root[2:], "a") @@ -1369,7 +1369,7 @@ func TestValidator_WaitForKeymanagerInitialization_Web(t *testing.T) { func TestValidator_WaitForKeymanagerInitialization_Interop(t *testing.T) { for _, isSlashingProtectionMinimal := range [...]bool{false, true} { t.Run(fmt.Sprintf("SlashingProtectionMinimal:%v", isSlashingProtectionMinimal), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := dbTest.SetupDB(t, t.TempDir(), [][fieldparams.BLSPubkeyLength]byte{}, isSlashingProtectionMinimal) root := make([]byte, 32) copy(root[2:], "a") @@ -1429,7 +1429,7 @@ func (m *PrepareBeaconProposerRequestMatcher) String() string { func TestValidator_PushSettings(t *testing.T) { for _, isSlashingProtectionMinimal := range [...]bool{false, true} { ctrl := gomock.NewController(t) - ctx := context.Background() + ctx := t.Context() db := dbTest.SetupDB(t, t.TempDir(), [][fieldparams.BLSPubkeyLength]byte{}, isSlashingProtectionMinimal) client := validatormock.NewMockValidatorClient(ctrl) nodeClient := validatormock.NewMockNodeClient(ctrl) @@ -1510,7 +1510,7 @@ func TestValidator_PushSettings(t *testing.T) { GasLimit: 40000000, }, } - err = v.SetProposerSettings(context.Background(), &proposer.Settings{ + err = v.SetProposerSettings(t.Context(), &proposer.Settings{ ProposeConfig: config, DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ @@ -1601,7 +1601,7 @@ func TestValidator_PushSettings(t *testing.T) { GasLimit: 40000000, }, } - err = v.SetProposerSettings(context.Background(), &proposer.Settings{ + err = v.SetProposerSettings(t.Context(), &proposer.Settings{ ProposeConfig: config, DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ @@ -1683,7 +1683,7 @@ func TestValidator_PushSettings(t *testing.T) { FeeRecipient: common.HexToAddress("0x055Fb65722E7b2455043BFEBf6177F1D2e9738D9"), }, } - err = v.SetProposerSettings(context.Background(), &proposer.Settings{ + err = v.SetProposerSettings(t.Context(), &proposer.Settings{ ProposeConfig: config, DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ @@ -1726,7 +1726,7 @@ func TestValidator_PushSettings(t *testing.T) { require.NoError(t, err) keys, err := km.FetchValidatingPublicKeys(ctx) require.NoError(t, err) - err = v.SetProposerSettings(context.Background(), &proposer.Settings{ + err = v.SetProposerSettings(t.Context(), &proposer.Settings{ ProposeConfig: nil, DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ @@ -1792,7 +1792,7 @@ func TestValidator_PushSettings(t *testing.T) { } err := v.WaitForKeymanagerInitialization(ctx) require.NoError(t, err) - err = v.SetProposerSettings(context.Background(), &proposer.Settings{ + err = v.SetProposerSettings(t.Context(), &proposer.Settings{ ProposeConfig: nil, DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ @@ -1889,7 +1889,7 @@ func TestValidator_PushSettings(t *testing.T) { FeeRecipient: common.Address{}, }, } - err = v.SetProposerSettings(context.Background(), &proposer.Settings{ + err = v.SetProposerSettings(t.Context(), &proposer.Settings{ ProposeConfig: config, DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ @@ -1936,7 +1936,7 @@ func TestValidator_PushSettings(t *testing.T) { PublicKeys: [][]byte{keys[0][:]}, Indices: []primitives.ValidatorIndex{unknownIndex}, }, nil) - err = v.SetProposerSettings(context.Background(), &proposer.Settings{ + err = v.SetProposerSettings(t.Context(), &proposer.Settings{ ProposeConfig: config, DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ @@ -1993,7 +1993,7 @@ func TestValidator_PushSettings(t *testing.T) { GasLimit: 40000000, }, } - err = v.SetProposerSettings(context.Background(), &proposer.Settings{ + err = v.SetProposerSettings(t.Context(), &proposer.Settings{ ProposeConfig: config, DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ @@ -2112,7 +2112,7 @@ func TestValidator_buildPrepProposerReqs_WithoutDefaultConfig(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() client := validatormock.NewMockValidatorClient(ctrl) client.EXPECT().MultipleValidatorStatus( @@ -2201,7 +2201,7 @@ func TestValidator_filterAndCacheActiveKeys(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() client := validatormock.NewMockValidatorClient(ctrl) client.EXPECT().MultipleValidatorStatus( @@ -2225,7 +2225,7 @@ func TestValidator_filterAndCacheActiveKeys(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() client := validatormock.NewMockValidatorClient(ctrl) client.EXPECT().MultipleValidatorStatus( @@ -2267,7 +2267,7 @@ func TestValidator_filterAndCacheActiveKeys(t *testing.T) { require.Equal(t, 3, len(keys)) }) t.Run("cache used mid epoch, no new keys added", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() v := validator{ pubkeyToStatus: map[[48]byte]*validatorStatus{ pubkey1: { @@ -2354,7 +2354,7 @@ func TestValidator_buildPrepProposerReqs_WithDefaultConfig(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() client := validatormock.NewMockValidatorClient(ctrl) client.EXPECT().MultipleValidatorStatus( @@ -2511,7 +2511,7 @@ func TestValidator_buildSignedRegReqs_DefaultConfigDisabled(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() client := validatormock.NewMockValidatorClient(ctrl) signature := blsmock.NewMockSignature(ctrl) @@ -2611,7 +2611,7 @@ func TestValidator_buildSignedRegReqs_DefaultConfigEnabled(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() client := validatormock.NewMockValidatorClient(ctrl) signature := blsmock.NewMockSignature(ctrl) @@ -2723,7 +2723,7 @@ func TestValidator_buildSignedRegReqs_SignerOnError(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() client := validatormock.NewMockValidatorClient(ctrl) v := validator{ @@ -2764,7 +2764,7 @@ func TestValidator_buildSignedRegReqs_TimestampBeforeGenesis(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() client := validatormock.NewMockValidatorClient(ctrl) signature := blsmock.NewMockSignature(ctrl) @@ -2845,7 +2845,7 @@ func TestValidator_ChangeHost(t *testing.T) { } func TestUpdateValidatorStatusCache(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() pubkeys := [][fieldparams.BLSPubkeyLength]byte{ @@ -2913,7 +2913,7 @@ func TestValidator_CheckDependentRoots(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() client := validatormock.NewMockValidatorClient(ctrl) v := &validator{ diff --git a/validator/client/wait_for_activation_test.go b/validator/client/wait_for_activation_test.go index f9ddc2515f..a95ea993a5 100644 --- a/validator/client/wait_for_activation_test.go +++ b/validator/client/wait_for_activation_test.go @@ -1,7 +1,6 @@ package client import ( - "context" "fmt" "testing" "time" @@ -37,7 +36,7 @@ func TestWaitActivation_Exiting_OK(t *testing.T) { prysmChainClient: prysmChainClient, accountsChangedChannel: make(chan [][fieldparams.BLSPubkeyLength]byte, 1), } - ctx := context.Background() + ctx := t.Context() resp := testutil.GenerateMultipleValidatorStatusResponse([][]byte{kp.pub[:]}) resp.Statuses[0].Status = ethpb.ValidatorStatus_EXITING validatorClient.EXPECT().MultipleValidatorStatus( @@ -97,7 +96,7 @@ func TestWaitForActivation_RefetchKeys(t *testing.T) { require.NoError(t, km.add(kp)) km.SimulateAccountChanges([][48]byte{kp.pub}) }() - assert.NoError(t, v.WaitForActivation(context.Background()), "Could not wait for activation") + assert.NoError(t, v.WaitForActivation(t.Context()), "Could not wait for activation") assert.LogsContain(t, hook, msgNoKeysFetched) assert.LogsContain(t, hook, "Validator activated") } @@ -159,7 +158,7 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) { ðpb.ChainHead{HeadEpoch: 0}, nil, ).AnyTimes() - assert.NoError(t, v.WaitForActivation(context.Background())) + assert.NoError(t, v.WaitForActivation(t.Context())) assert.LogsContain(t, hook, "Waiting for deposit to be observed by beacon node") assert.LogsContain(t, hook, "Validator activated") }) @@ -181,7 +180,7 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) { AccountPasswords: make(map[string]string), WalletPassword: "secretPassw0rd$1999", } - ctx := context.Background() + ctx := t.Context() km, err := derived.NewKeymanager(ctx, &derived.SetupConfig{ Wallet: wallet, ListenForChanges: true, @@ -238,7 +237,7 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) { ðpb.ChainHead{HeadEpoch: 0}, nil, ).AnyTimes() - assert.NoError(t, v.WaitForActivation(context.Background())) + assert.NoError(t, v.WaitForActivation(t.Context())) assert.LogsContain(t, hook, "Waiting for deposit to be observed by beacon node") assert.LogsContain(t, hook, "Validator activated") }) @@ -283,5 +282,5 @@ func TestWaitForActivation_AttemptsReconnectionOnFailure(t *testing.T) { ðpb.ChainHead{HeadEpoch: 0}, nil, ).AnyTimes() - assert.NoError(t, v.WaitForActivation(context.Background())) + assert.NoError(t, v.WaitForActivation(t.Context())) } diff --git a/validator/db/convert_test.go b/validator/db/convert_test.go index 7d376a78e3..5627730dc9 100644 --- a/validator/db/convert_test.go +++ b/validator/db/convert_test.go @@ -1,7 +1,6 @@ package db import ( - "context" "fmt" "path/filepath" "testing" @@ -36,7 +35,7 @@ func getFeeRecipientFromString(t *testing.T, feeRecipientString string) [fieldpa } func TestDB_ConvertDatabase(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubKeyString1 := "0x80000060606fa05c7339dd7bcd0d3e4d8b573fa30dea2fdb4997031a703e3300326e3c054be682f92d9c367cd647bbea" pubKeyString2 := "0x81000060606fa05c7339dd7bcd0d3e4d8b573fa30dea2fdb4997031a703e3300326e3c054be682f92d9c367cd647bbea" diff --git a/validator/db/filesystem/attester_protection_test.go b/validator/db/filesystem/attester_protection_test.go index ae48e69b65..e5bbf208a2 100644 --- a/validator/db/filesystem/attester_protection_test.go +++ b/validator/db/filesystem/attester_protection_test.go @@ -1,7 +1,6 @@ package filesystem import ( - "context" "sync" "testing" @@ -19,7 +18,7 @@ func TestStore_EIPImportBlacklistedPublicKeys(t *testing.T) { require.NoError(t, err, "could not create store") var expected = [][fieldparams.BLSPubkeyLength]byte{} - actual, err := store.EIPImportBlacklistedPublicKeys(context.Background()) + actual, err := store.EIPImportBlacklistedPublicKeys(t.Context()) require.NoError(t, err, "could not get blacklisted public keys") require.DeepSSZEqual(t, expected, actual, "blacklisted public keys do not match") } @@ -30,7 +29,7 @@ func TestStore_SaveEIPImportBlacklistedPublicKeys(t *testing.T) { require.NoError(t, err, "could not create store") // Save blacklisted public keys. - err = store.SaveEIPImportBlacklistedPublicKeys(context.Background(), [][fieldparams.BLSPubkeyLength]byte{}) + err = store.SaveEIPImportBlacklistedPublicKeys(t.Context(), [][fieldparams.BLSPubkeyLength]byte{}) require.NoError(t, err, "could not save blacklisted public keys") } @@ -46,7 +45,7 @@ func TestStore_LowestSignedTargetEpoch(t *testing.T) { require.NoError(t, err, "could not create store") // Get the lowest signed target epoch. - _, exists, err := store.LowestSignedTargetEpoch(context.Background(), [fieldparams.BLSPubkeyLength]byte{}) + _, exists, err := store.LowestSignedTargetEpoch(t.Context(), [fieldparams.BLSPubkeyLength]byte{}) require.NoError(t, err, "could not get lowest signed target epoch") require.Equal(t, false, exists, "lowest signed target epoch should not exist") @@ -59,12 +58,12 @@ func TestStore_LowestSignedTargetEpoch(t *testing.T) { } // Save the attestation. - err = store.SaveAttestationForPubKey(context.Background(), pubkey, [32]byte{}, attestation) + err = store.SaveAttestationForPubKey(t.Context(), pubkey, [32]byte{}, attestation) require.NoError(t, err, "SaveAttestationForPubKey should not return an error") // Get the lowest signed target epoch. expected := primitives.Epoch(savedTargetEpoch) - actual, exists, err := store.LowestSignedTargetEpoch(context.Background(), pubkey) + actual, exists, err := store.LowestSignedTargetEpoch(t.Context(), pubkey) require.NoError(t, err, "could not get lowest signed target epoch") require.Equal(t, true, exists, "lowest signed target epoch should not exist") require.Equal(t, expected, actual, "lowest signed target epoch should match") @@ -79,7 +78,7 @@ func TestStore_LowestSignedSourceEpoch(t *testing.T) { require.NoError(t, err, "could not create store") // Get the lowest signed target epoch. - _, exists, err := store.LowestSignedSourceEpoch(context.Background(), [fieldparams.BLSPubkeyLength]byte{}) + _, exists, err := store.LowestSignedSourceEpoch(t.Context(), [fieldparams.BLSPubkeyLength]byte{}) require.NoError(t, err, "could not get lowest signed source epoch") require.Equal(t, false, exists, "lowest signed source epoch should not exist") @@ -93,12 +92,12 @@ func TestStore_LowestSignedSourceEpoch(t *testing.T) { } // Save the attestation. - err = store.SaveAttestationForPubKey(context.Background(), pubkey, [32]byte{}, attestation) + err = store.SaveAttestationForPubKey(t.Context(), pubkey, [32]byte{}, attestation) require.NoError(t, err, "SaveAttestationForPubKey should not return an error") // Get the lowest signed target epoch. expected := primitives.Epoch(savedSourceEpoch) - actual, exists, err := store.LowestSignedSourceEpoch(context.Background(), pubkey) + actual, exists, err := store.LowestSignedSourceEpoch(t.Context(), pubkey) require.NoError(t, err, "could not get lowest signed target epoch") require.Equal(t, true, exists, "lowest signed target epoch should exist") require.Equal(t, expected, actual, "lowest signed target epoch should match") @@ -118,7 +117,7 @@ func TestStore_AttestedPublicKeys(t *testing.T) { // Attest for some pubkeys. attestedPubkeys := pubkeys[1:3] for _, pubkey := range attestedPubkeys { - err = s.SaveAttestationForPubKey(context.Background(), pubkey, [32]byte{}, ðpb.IndexedAttestation{ + err = s.SaveAttestationForPubKey(t.Context(), pubkey, [32]byte{}, ðpb.IndexedAttestation{ Data: ðpb.AttestationData{ Source: ðpb.Checkpoint{Epoch: 42}, Target: ðpb.Checkpoint{Epoch: 43}, @@ -128,7 +127,7 @@ func TestStore_AttestedPublicKeys(t *testing.T) { } // Check the public keys. - actual, err := s.AttestedPublicKeys(context.Background()) + actual, err := s.AttestedPublicKeys(t.Context()) require.NoError(t, err, "publicKeys should not return an error") // We cannot compare the slices directly because the order is not guaranteed, @@ -276,13 +275,13 @@ func TestStore_SaveAttestationForPubKey(t *testing.T) { if tt.existingAttInDB != nil { // Simulate an already existing slashing protection. - err = store.SaveAttestationForPubKey(context.Background(), pubkey, [32]byte{}, tt.existingAttInDB) + err = store.SaveAttestationForPubKey(t.Context(), pubkey, [32]byte{}, tt.existingAttInDB) require.NoError(t, err, "failed to save attestation when simulating an already existing slashing protection") } if tt.incomingAtt != nil { // Attempt to save a new attestation. - err = store.SaveAttestationForPubKey(context.Background(), pubkey, [32]byte{}, tt.incomingAtt) + err = store.SaveAttestationForPubKey(t.Context(), pubkey, [32]byte{}, tt.incomingAtt) if len(tt.expectedErr) > 0 { require.ErrorContains(t, tt.expectedErr, err) } else { @@ -299,7 +298,7 @@ func pointerFromInt(i uint64) *uint64 { func TestStore_SaveAttestationsForPubKey2(t *testing.T) { // Get the context. - ctx := context.Background() + ctx := t.Context() // Create a public key. pubkey := getPubKeys(t, 1)[0] @@ -430,7 +429,7 @@ func TestStore_AttestationHistoryForPubKey(t *testing.T) { require.NoError(t, err, "NewStore should not return an error") // Get the attestation history. - actual, err := store.AttestationHistoryForPubKey(context.Background(), pubkey) + actual, err := store.AttestationHistoryForPubKey(t.Context(), pubkey) require.NoError(t, err, "AttestationHistoryForPubKey should not return an error") require.DeepEqual(t, []*common.AttestationRecord{}, actual) @@ -444,7 +443,7 @@ func TestStore_AttestationHistoryForPubKey(t *testing.T) { } // Save the attestation. - err = store.SaveAttestationForPubKey(context.Background(), pubkey, [32]byte{}, attestation) + err = store.SaveAttestationForPubKey(t.Context(), pubkey, [32]byte{}, attestation) require.NoError(t, err, "SaveAttestationForPubKey should not return an error") // Get the attestation history. @@ -456,14 +455,14 @@ func TestStore_AttestationHistoryForPubKey(t *testing.T) { }, } - actual, err = store.AttestationHistoryForPubKey(context.Background(), pubkey) + actual, err = store.AttestationHistoryForPubKey(t.Context(), pubkey) require.NoError(t, err, "AttestationHistoryForPubKey should not return an error") require.DeepEqual(t, expected, actual) } func BenchmarkStore_SaveAttestationForPubKey(b *testing.B) { var wg sync.WaitGroup - ctx := context.Background() + ctx := b.Context() // Create pubkeys pubkeys := make([][fieldparams.BLSPubkeyLength]byte, 2000) diff --git a/validator/db/filesystem/db_test.go b/validator/db/filesystem/db_test.go index bf76710f27..3118e5836e 100644 --- a/validator/db/filesystem/db_test.go +++ b/validator/db/filesystem/db_test.go @@ -1,7 +1,6 @@ package filesystem import ( - "context" "fmt" "os" "path" @@ -106,7 +105,7 @@ func TestStore_Backup(t *testing.T) { require.NoError(t, err, "NewStore should not return an error") // Update the proposer settings. - err = s.SaveProposerSettings(context.Background(), &proposer.Settings{ + err = s.SaveProposerSettings(t.Context(), &proposer.Settings{ DefaultConfig: &proposer.Option{ FeeRecipientConfig: &proposer.FeeRecipientConfig{ FeeRecipient: common.Address{}, @@ -116,7 +115,7 @@ func TestStore_Backup(t *testing.T) { require.NoError(t, err, "SaveProposerSettings should not return an error") // Backup the DB. - require.NoError(t, s.Backup(context.Background(), backupsPath, true), "Backup should not return an error") + require.NoError(t, s.Backup(t.Context(), backupsPath, true), "Backup should not return an error") // Get the directory path of the backup. files, err := os.ReadDir(path.Join(backupsPath, backupsDirectoryName)) diff --git a/validator/db/filesystem/genesis_test.go b/validator/db/filesystem/genesis_test.go index 738a511ab2..1e9ed81488 100644 --- a/validator/db/filesystem/genesis_test.go +++ b/validator/db/filesystem/genesis_test.go @@ -1,14 +1,13 @@ package filesystem import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/testing/require" ) func TestStore_GenesisValidatorsRoot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesisValidatorRootString := "0x0100" genesisValidatorRootBytes := []byte{1, 0} @@ -52,7 +51,7 @@ func TestStore_GenesisValidatorsRoot(t *testing.T) { } func TestStore_SaveGenesisValidatorsRoot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesisValidatorRootString := "0x0100" for _, tt := range []struct { diff --git a/validator/db/filesystem/graffiti_test.go b/validator/db/filesystem/graffiti_test.go index 3a43037076..c8aad1a916 100644 --- a/validator/db/filesystem/graffiti_test.go +++ b/validator/db/filesystem/graffiti_test.go @@ -1,7 +1,6 @@ package filesystem import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -29,7 +28,7 @@ func TestStore_SaveGraffitiOrderedIndex(t *testing.T) { require.NoError(t, err) // Save graffiti ordered index. - err = store.SaveGraffitiOrderedIndex(context.Background(), graffitiOrderedIndex) + err = store.SaveGraffitiOrderedIndex(t.Context(), graffitiOrderedIndex) require.NoError(t, err) }) } @@ -87,7 +86,7 @@ func TestStore_GraffitiOrderedIndex(t *testing.T) { require.NoError(t, err) // Get graffiti ordered index. - actualGraffitiOrderedIndex, err := store.GraffitiOrderedIndex(context.Background(), tt.fileHash) + actualGraffitiOrderedIndex, err := store.GraffitiOrderedIndex(t.Context(), tt.fileHash) require.NoError(t, err) require.Equal(t, tt.expectedGraffitiOrderedIndex, actualGraffitiOrderedIndex) }) diff --git a/validator/db/filesystem/import_test.go b/validator/db/filesystem/import_test.go index 23dacc11ae..d3eff6ad4b 100644 --- a/validator/db/filesystem/import_test.go +++ b/validator/db/filesystem/import_test.go @@ -2,7 +2,6 @@ package filesystem import ( "bytes" - "context" "encoding/json" "testing" @@ -24,7 +23,7 @@ func TestStore_ImportInterchangeData_BadJSON(t *testing.T) { require.NoError(t, err, "NewStore should not return an error") buf := bytes.NewBuffer([]byte("helloworld")) - err = s.ImportStandardProtectionJSON(context.Background(), buf) + err = s.ImportStandardProtectionJSON(t.Context(), buf) require.ErrorContains(t, "could not unmarshal slashing protection JSON file", err) } @@ -41,12 +40,12 @@ func TestStore_ImportInterchangeData_NilData_FailsSilently(t *testing.T) { require.NoError(t, err) buf := bytes.NewBuffer(encoded) - err = s.ImportStandardProtectionJSON(context.Background(), buf) + err = s.ImportStandardProtectionJSON(t.Context(), buf) require.NoError(t, err) } func TestStore_ImportInterchangeData_BadFormat_PreventsDBWrites(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 10 publicKeys, err := valtest.CreateRandomPubKeys(numValidators) require.NoError(t, err) @@ -94,7 +93,7 @@ func TestStore_ImportInterchangeData_BadFormat_PreventsDBWrites(t *testing.T) { } func TestStore_ImportInterchangeData_OK(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 10 publicKeys, err := valtest.CreateRandomPubKeys(numValidators) require.NoError(t, err) diff --git a/validator/db/filesystem/migration_test.go b/validator/db/filesystem/migration_test.go index d928af8ce2..ac8f54db37 100644 --- a/validator/db/filesystem/migration_test.go +++ b/validator/db/filesystem/migration_test.go @@ -1,7 +1,6 @@ package filesystem import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/testing/require" @@ -13,7 +12,7 @@ func TestStore_RunUpMigrations(t *testing.T) { require.NoError(t, err, "NewStore should not return an error") // Just check `RunUpMigrations` does not return an error. - err = store.RunUpMigrations(context.Background()) + err = store.RunUpMigrations(t.Context()) require.NoError(t, err, "RunUpMigrations should not return an error") } @@ -23,6 +22,6 @@ func TestStore_RunDownMigrations(t *testing.T) { require.NoError(t, err, "NewStore should not return an error") // Just check `RunDownMigrations` does not return an error. - err = store.RunDownMigrations(context.Background()) + err = store.RunDownMigrations(t.Context()) require.NoError(t, err, "RunUpMigrations should not return an error") } diff --git a/validator/db/filesystem/proposer_protection_test.go b/validator/db/filesystem/proposer_protection_test.go index 87acbc5b81..46cbb4b716 100644 --- a/validator/db/filesystem/proposer_protection_test.go +++ b/validator/db/filesystem/proposer_protection_test.go @@ -1,7 +1,6 @@ package filesystem import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -16,7 +15,7 @@ import ( func TestStore_ProposalHistoryForPubKey(t *testing.T) { var slot uint64 = 42 - ctx := context.Background() + ctx := t.Context() for _, tt := range []struct { name string @@ -73,7 +72,7 @@ func TestStore_SaveProposalHistoryForSlot(t *testing.T) { slot43 uint64 = 43 ) - ctx := context.Background() + ctx := t.Context() for _, tt := range []struct { name string @@ -164,7 +163,7 @@ func TestStore_ProposedPublicKeys(t *testing.T) { // We check the public keys expected := pubkeys - actual, err := s.ProposedPublicKeys(context.Background()) + actual, err := s.ProposedPublicKeys(t.Context()) require.NoError(t, err, "publicKeys should not return an error") // We cannot compare the slices directly because the order is not guaranteed, @@ -183,7 +182,7 @@ func TestStore_ProposedPublicKeys(t *testing.T) { } func Test_slashableProposalCheck_PreventsLowerThanMinProposal(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // We get a database path databasePath := t.TempDir() @@ -214,7 +213,7 @@ func Test_slashableProposalCheck_PreventsLowerThanMinProposal(t *testing.T) { } wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - err = s.SlashableProposalCheck(context.Background(), pubkey, wsb, [32]byte{4}, false, nil) + err = s.SlashableProposalCheck(t.Context(), pubkey, wsb, [32]byte{4}, false, nil) require.ErrorContains(t, common.FailedBlockSignLocalErr, err) // We expect the same block with a slot equal to the lowest @@ -229,14 +228,14 @@ func Test_slashableProposalCheck_PreventsLowerThanMinProposal(t *testing.T) { } wsb, err = blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - err = s.SlashableProposalCheck(context.Background(), pubkey, wsb, [32]byte{1}, false, nil) + err = s.SlashableProposalCheck(t.Context(), pubkey, wsb, [32]byte{1}, false, nil) require.ErrorContains(t, common.FailedBlockSignLocalErr, err) // We expect the same block with a slot equal to the lowest // signed slot to fail validation if signing roots are different. wsb, err = blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - err = s.SlashableProposalCheck(context.Background(), pubkey, wsb, [32]byte{4}, false, nil) + err = s.SlashableProposalCheck(t.Context(), pubkey, wsb, [32]byte{4}, false, nil) require.ErrorContains(t, common.FailedBlockSignLocalErr, err) // We expect the same block with a slot > than the lowest @@ -252,12 +251,12 @@ func Test_slashableProposalCheck_PreventsLowerThanMinProposal(t *testing.T) { wsb, err = blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - err = s.SlashableProposalCheck(context.Background(), pubkey, wsb, [32]byte{3}, false, nil) + err = s.SlashableProposalCheck(t.Context(), pubkey, wsb, [32]byte{3}, false, nil) require.NoError(t, err) } func Test_slashableProposalCheck(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // We get a database path databasePath := t.TempDir() @@ -291,11 +290,11 @@ func Test_slashableProposalCheck(t *testing.T) { require.NoError(t, err) // We expect the same block sent out should be slasahble. - err = s.SlashableProposalCheck(context.Background(), pubkey, sBlock, dummySigningRoot, false, nil) + err = s.SlashableProposalCheck(t.Context(), pubkey, sBlock, dummySigningRoot, false, nil) require.ErrorContains(t, common.FailedBlockSignLocalErr, err) // We expect the same block sent out with a different signing root should be slashable. - err = s.SlashableProposalCheck(context.Background(), pubkey, sBlock, [32]byte{2}, false, nil) + err = s.SlashableProposalCheck(t.Context(), pubkey, sBlock, [32]byte{2}, false, nil) require.ErrorContains(t, common.FailedBlockSignLocalErr, err) // We save a proposal at slot 11 with a nil signing root. @@ -307,7 +306,7 @@ func Test_slashableProposalCheck(t *testing.T) { // We expect the same block sent out should return slashable error even // if we had a nil signing root stored in the database. - err = s.SlashableProposalCheck(context.Background(), pubkey, sBlock, [32]byte{2}, false, nil) + err = s.SlashableProposalCheck(t.Context(), pubkey, sBlock, [32]byte{2}, false, nil) require.ErrorContains(t, common.FailedBlockSignLocalErr, err) // A block with a different slot for which we do not have a proposing history @@ -315,7 +314,7 @@ func Test_slashableProposalCheck(t *testing.T) { blk.Block.Slot = 9 sBlock, err = blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - err = s.SlashableProposalCheck(context.Background(), pubkey, sBlock, [32]byte{3}, false, nil) + err = s.SlashableProposalCheck(t.Context(), pubkey, sBlock, [32]byte{3}, false, nil) require.ErrorContains(t, common.FailedBlockSignLocalErr, err) } @@ -336,6 +335,6 @@ func Test_slashableProposalCheck_RemoteProtection(t *testing.T) { sBlock, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - err = s.SlashableProposalCheck(context.Background(), pubkey, sBlock, [32]byte{2}, false, nil) + err = s.SlashableProposalCheck(t.Context(), pubkey, sBlock, [32]byte{2}, false, nil) require.NoError(t, err, "Expected allowed block not to throw error") } diff --git a/validator/db/filesystem/proposer_settings_test.go b/validator/db/filesystem/proposer_settings_test.go index 565695cafc..a9b370b527 100644 --- a/validator/db/filesystem/proposer_settings_test.go +++ b/validator/db/filesystem/proposer_settings_test.go @@ -1,7 +1,6 @@ package filesystem import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -28,7 +27,7 @@ func getFeeRecipientFromString(t *testing.T, feeRecipientString string) [fieldpa } func TestStore_ProposerSettings(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubkeyString := "0xb3533c600c6c22aa5177f295667deacffde243980d3c04da4057ab0941dcca1dff83ae8e6534bedd2d23d83446e604d6" customFeeRecipientString := "0xd4E96eF8eee8678dBFf4d535E033Ed1a4F7605b7" @@ -110,7 +109,7 @@ func TestStore_ProposerSettings(t *testing.T) { } func TestStore_ProposerSettingsExists(t *testing.T) { - ctx := context.Background() + ctx := t.Context() for _, tt := range []struct { name string @@ -151,7 +150,7 @@ func TestStore_ProposerSettingsExists(t *testing.T) { } func TestStore_SaveProposerSettings(t *testing.T) { - ctx := context.Background() + ctx := t.Context() preExistingFeeRecipientString := "0xD871172AE08B5FC37B3AC3D445225928DE883876" incomingFeeRecipientString := "0xC771172AE08B5FC37B3AC3D445225928DE883876" diff --git a/validator/db/kv/attester_protection_test.go b/validator/db/kv/attester_protection_test.go index f966d53471..a44fa3fc6e 100644 --- a/validator/db/kv/attester_protection_test.go +++ b/validator/db/kv/attester_protection_test.go @@ -45,7 +45,7 @@ func TestPendingAttestationRecords_Len(t *testing.T) { } func TestStore_CheckSlashableAttestation_DoubleVote(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 1 pubKeys := make([][fieldparams.BLSPubkeyLength]byte, numValidators) validatorDB := setupDB(t, pubKeys) @@ -116,7 +116,7 @@ func TestStore_CheckSlashableAttestation_DoubleVote(t *testing.T) { } func TestStore_CheckSlashableAttestation_SurroundVote_MultipleTargetsPerSource(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 1 pubKeys := make([][fieldparams.BLSPubkeyLength]byte, numValidators) validatorDB := setupDB(t, pubKeys) @@ -141,7 +141,7 @@ func TestStore_CheckSlashableAttestation_SurroundVote_MultipleTargetsPerSource(t } func TestStore_CheckSlashableAttestation_SurroundVote_54kEpochs(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 1 numEpochs := primitives.Epoch(54000) pubKeys := make([][fieldparams.BLSPubkeyLength]byte, numValidators) @@ -214,7 +214,7 @@ func TestStore_CheckSlashableAttestation_SurroundVote_54kEpochs(t *testing.T) { } func TestLowestSignedSourceEpoch_SaveRetrieve(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validatorDB, err := NewKVStore(ctx, t.TempDir(), &Config{}) require.NoError(t, err, "Failed to instantiate DB") t.Cleanup(func() { @@ -273,7 +273,7 @@ func TestLowestSignedSourceEpoch_SaveRetrieve(t *testing.T) { } func TestLowestSignedTargetEpoch_SaveRetrieveReplace(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validatorDB, err := NewKVStore(ctx, t.TempDir(), &Config{}) require.NoError(t, err, "Failed to instantiate DB") t.Cleanup(func() { @@ -332,7 +332,7 @@ func TestLowestSignedTargetEpoch_SaveRetrieveReplace(t *testing.T) { } func TestStore_SaveAttestationsForPubKey(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 1 pubKeys := make([][fieldparams.BLSPubkeyLength]byte, numValidators) validatorDB := setupDB(t, pubKeys) @@ -373,7 +373,7 @@ func TestStore_SaveAttestationsForPubKey(t *testing.T) { func TestSaveAttestationForPubKey_BatchWrites_FullCapacity(t *testing.T) { hook := logTest.NewGlobal() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() numValidators := attestationBatchCapacity pubKeys := make([][fieldparams.BLSPubkeyLength]byte, numValidators) @@ -426,7 +426,7 @@ func TestSaveAttestationForPubKey_BatchWrites_FullCapacity(t *testing.T) { func TestSaveAttestationForPubKey_BatchWrites_LowCapacity_TimerReached(t *testing.T) { hook := logTest.NewGlobal() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Number of validators equal to half the total capacity // of batch attestation processing. This will allow us to @@ -501,7 +501,7 @@ func benchCheckSurroundVote( numEpochs primitives.Epoch, shouldSurround bool, ) { - ctx := context.Background() + ctx := b.Context() validatorDB, err := NewKVStore(ctx, filepath.Join(b.TempDir(), "benchsurroundvote"), &Config{ PubKeys: pubKeys, }) @@ -562,13 +562,13 @@ func TestStore_flushAttestationRecords_InProgress(t *testing.T) { s.batchedAttestationsFlushInProgress.Set() hook := logTest.NewGlobal() - s.flushAttestationRecords(context.Background(), nil) + s.flushAttestationRecords(t.Context(), nil) assert.LogsContain(t, hook, "Attempted to flush attestation records when already in progress") } func BenchmarkStore_SaveAttestationForPubKey(b *testing.B) { var wg sync.WaitGroup - ctx := context.Background() + ctx := b.Context() // Create pubkeys pubkeys := make([][fieldparams.BLSPubkeyLength]byte, 10) diff --git a/validator/db/kv/backup_test.go b/validator/db/kv/backup_test.go index 9a3f06f661..8266133394 100644 --- a/validator/db/kv/backup_test.go +++ b/validator/db/kv/backup_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "os" "path/filepath" "testing" @@ -14,7 +13,7 @@ import ( func TestStore_Backup(t *testing.T) { db := setupDB(t, nil) - ctx := context.Background() + ctx := t.Context() root := [32]byte{1} require.NoError(t, db.SaveGenesisValidatorsRoot(ctx, root[:])) require.NoError(t, db.Backup(ctx, "", true)) @@ -44,7 +43,7 @@ func TestStore_Backup(t *testing.T) { func TestStore_NestedBackup(t *testing.T) { keys := [][fieldparams.BLSPubkeyLength]byte{{'A'}, {'B'}} db := setupDB(t, keys) - ctx := context.Background() + ctx := t.Context() root := [32]byte{1} idxAtt := ðpb.IndexedAttestation{ AttestingIndices: nil, @@ -64,8 +63,8 @@ func TestStore_NestedBackup(t *testing.T) { Signature: make([]byte, 96), } require.NoError(t, db.SaveGenesisValidatorsRoot(ctx, root[:])) - require.NoError(t, db.SaveAttestationForPubKey(context.Background(), keys[0], [32]byte{'C'}, idxAtt)) - require.NoError(t, db.SaveAttestationForPubKey(context.Background(), keys[1], [32]byte{'C'}, idxAtt)) + require.NoError(t, db.SaveAttestationForPubKey(t.Context(), keys[0], [32]byte{'C'}, idxAtt)) + require.NoError(t, db.SaveAttestationForPubKey(t.Context(), keys[1], [32]byte{'C'}, idxAtt)) require.NoError(t, db.Backup(ctx, "", true)) backupsPath := filepath.Join(db.databasePath, backupsDirectoryName) @@ -91,7 +90,7 @@ func TestStore_NestedBackup(t *testing.T) { signingRoot32 := [32]byte{'C'} - hist, err := backedDB.AttestationHistoryForPubKey(context.Background(), keys[0]) + hist, err := backedDB.AttestationHistoryForPubKey(t.Context(), keys[0]) require.NoError(t, err) require.DeepEqual(t, &common.AttestationRecord{ PubKey: keys[0], @@ -100,7 +99,7 @@ func TestStore_NestedBackup(t *testing.T) { SigningRoot: signingRoot32[:], }, hist[0]) - hist, err = backedDB.AttestationHistoryForPubKey(context.Background(), keys[1]) + hist, err = backedDB.AttestationHistoryForPubKey(t.Context(), keys[1]) require.NoError(t, err) require.DeepEqual(t, &common.AttestationRecord{ PubKey: keys[1], @@ -109,12 +108,12 @@ func TestStore_NestedBackup(t *testing.T) { SigningRoot: signingRoot32[:], }, hist[0]) - ep, exists, err := backedDB.LowestSignedSourceEpoch(context.Background(), keys[0]) + ep, exists, err := backedDB.LowestSignedSourceEpoch(t.Context(), keys[0]) require.NoError(t, err) require.Equal(t, true, exists) require.Equal(t, 10, int(ep)) - ep, exists, err = backedDB.LowestSignedSourceEpoch(context.Background(), keys[1]) + ep, exists, err = backedDB.LowestSignedSourceEpoch(t.Context(), keys[1]) require.NoError(t, err) require.Equal(t, true, exists) require.Equal(t, 10, int(ep)) diff --git a/validator/db/kv/eip_blacklisted_keys_test.go b/validator/db/kv/eip_blacklisted_keys_test.go index 7cccb0f9f1..634f4f296a 100644 --- a/validator/db/kv/eip_blacklisted_keys_test.go +++ b/validator/db/kv/eip_blacklisted_keys_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "fmt" "testing" @@ -11,7 +10,7 @@ import ( ) func TestStore_EIPBlacklistedPublicKeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 100 publicKeys := make([][fieldparams.BLSPubkeyLength]byte, numValidators) for i := 0; i < numValidators; i++ { diff --git a/validator/db/kv/genesis_test.go b/validator/db/kv/genesis_test.go index bb2adf51ca..7e46e2cc61 100644 --- a/validator/db/kv/genesis_test.go +++ b/validator/db/kv/genesis_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -10,7 +9,7 @@ import ( ) func TestStore_GenesisValidatorsRoot_ReadAndWrite(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string diff --git a/validator/db/kv/graffiti_test.go b/validator/db/kv/graffiti_test.go index 8177c0fbef..53c0a05428 100644 --- a/validator/db/kv/graffiti_test.go +++ b/validator/db/kv/graffiti_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -10,7 +9,7 @@ import ( ) func TestStore_GraffitiOrderedIndex_ReadAndWrite(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{}) tests := []struct { name string @@ -60,7 +59,7 @@ func TestStore_GraffitiOrderedIndex_ReadAndWrite(t *testing.T) { } func TestStore_GraffitiFileHash(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // Creates database db := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{}) diff --git a/validator/db/kv/import_test.go b/validator/db/kv/import_test.go index 2e3502c550..12c26beb18 100644 --- a/validator/db/kv/import_test.go +++ b/validator/db/kv/import_test.go @@ -2,7 +2,6 @@ package kv import ( "bytes" - "context" "encoding/json" "fmt" "reflect" @@ -20,7 +19,7 @@ import ( ) func TestStore_ImportInterchangeData_BadJSON(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validatorDB := setupDB(t, nil) buf := bytes.NewBuffer([]byte("helloworld")) @@ -30,7 +29,7 @@ func TestStore_ImportInterchangeData_BadJSON(t *testing.T) { func TestStore_ImportInterchangeData_NilData_FailsSilently(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() validatorDB := setupDB(t, nil) interchangeJSON := &format.EIPSlashingProtectionFormat{} @@ -44,7 +43,7 @@ func TestStore_ImportInterchangeData_NilData_FailsSilently(t *testing.T) { } func TestStore_ImportInterchangeData_BadFormat_PreventsDBWrites(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 10 publicKeys, err := valtest.CreateRandomPubKeys(numValidators) require.NoError(t, err) @@ -104,7 +103,7 @@ func TestStore_ImportInterchangeData_BadFormat_PreventsDBWrites(t *testing.T) { } func TestStore_ImportInterchangeData_OK(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 10 publicKeys, err := valtest.CreateRandomPubKeys(numValidators) require.NoError(t, err) @@ -775,14 +774,14 @@ func Test_filterSlashablePubKeysFromBlocks(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() historyByPubKey := make(map[[fieldparams.BLSPubkeyLength]byte]common.ProposalHistoryForPubkey) for pubKey, signedBlocks := range tt.given { proposalHistory, err := transformSignedBlocks(ctx, signedBlocks) require.NoError(t, err) historyByPubKey[pubKey] = *proposalHistory } - slashablePubKeys := filterSlashablePubKeysFromBlocks(context.Background(), historyByPubKey) + slashablePubKeys := filterSlashablePubKeysFromBlocks(t.Context(), historyByPubKey) wantedPubKeys := make(map[[fieldparams.BLSPubkeyLength]byte]bool) for _, pk := range tt.expected { wantedPubKeys[pk] = true @@ -798,7 +797,7 @@ func Test_filterSlashablePubKeysFromBlocks(t *testing.T) { func Test_filterSlashablePubKeysFromAttestations(t *testing.T) { // filterSlashablePubKeysFromAttestations is used only for complete slashing protection. - ctx := context.Background() + ctx := t.Context() tests := []struct { name string previousAttsByPubKey map[[fieldparams.BLSPubkeyLength]byte][]*format.SignedAttestation diff --git a/validator/db/kv/kv_test.go b/validator/db/kv/kv_test.go index c01ae32562..79b8e9010d 100644 --- a/validator/db/kv/kv_test.go +++ b/validator/db/kv/kv_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "io" "os" "testing" @@ -20,7 +19,7 @@ func TestMain(m *testing.M) { // setupDB instantiates and returns a DB instance for the validator client. func setupDB(t testing.TB, pubkeys [][fieldparams.BLSPubkeyLength]byte) *Store { - db, err := NewKVStore(context.Background(), t.TempDir(), &Config{ + db, err := NewKVStore(t.Context(), t.TempDir(), &Config{ PubKeys: pubkeys, }) require.NoError(t, err, "Failed to instantiate DB") diff --git a/validator/db/kv/migration_optimal_attester_protection_test.go b/validator/db/kv/migration_optimal_attester_protection_test.go index fabb76b744..3791f8ef50 100644 --- a/validator/db/kv/migration_optimal_attester_protection_test.go +++ b/validator/db/kv/migration_optimal_attester_protection_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "fmt" "testing" @@ -96,7 +95,7 @@ func Test_migrateOptimalAttesterProtectionUp(t *testing.T) { { name: "partial data saved for both types still completes the migration successfully", setup: func(t *testing.T, validatorDB *Store) { - ctx := context.Background() + ctx := t.Context() pubKey := [fieldparams.BLSPubkeyLength]byte{1} history := newDeprecatedAttestingHistory(0) // Attest all epochs from genesis to 50. @@ -184,7 +183,7 @@ func Test_migrateOptimalAttesterProtectionUp(t *testing.T) { t.Run(tt.name, func(t *testing.T) { validatorDB := setupDB(t, nil) tt.setup(t, validatorDB) - require.NoError(t, validatorDB.migrateOptimalAttesterProtectionUp(context.Background())) + require.NoError(t, validatorDB.migrateOptimalAttesterProtectionUp(t.Context())) tt.eval(t, validatorDB) }) } @@ -293,7 +292,7 @@ func Test_migrateOptimalAttesterProtectionDown(t *testing.T) { t.Run(tt.name, func(t *testing.T) { validatorDB := setupDB(t, nil) tt.setup(t, validatorDB) - require.NoError(t, validatorDB.migrateOptimalAttesterProtectionDown(context.Background())) + require.NoError(t, validatorDB.migrateOptimalAttesterProtectionDown(t.Context())) tt.eval(t, validatorDB) }) } diff --git a/validator/db/kv/migration_source_target_epochs_bucket_test.go b/validator/db/kv/migration_source_target_epochs_bucket_test.go index 119aada601..d738c71507 100644 --- a/validator/db/kv/migration_source_target_epochs_bucket_test.go +++ b/validator/db/kv/migration_source_target_epochs_bucket_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "errors" "fmt" "reflect" @@ -109,7 +108,7 @@ func TestStore_migrateSourceTargetEpochsBucketUp(t *testing.T) { t.Run(tt.name, func(t *testing.T) { validatorDB := setupDB(t, pubKeys) tt.setup(t, validatorDB) - require.NoError(t, validatorDB.migrateSourceTargetEpochsBucketUp(context.Background())) + require.NoError(t, validatorDB.migrateSourceTargetEpochsBucketUp(t.Context())) tt.eval(t, validatorDB) }) } @@ -204,7 +203,7 @@ func TestStore_migrateSourceTargetEpochsBucketDown(t *testing.T) { t.Run(tt.name, func(t *testing.T) { validatorDB := setupDB(t, nil) tt.setup(t, validatorDB) - require.NoError(t, validatorDB.migrateSourceTargetEpochsBucketDown(context.Background())) + require.NoError(t, validatorDB.migrateSourceTargetEpochsBucketDown(t.Context())) tt.eval(t, validatorDB) }) } diff --git a/validator/db/kv/proposer_protection_test.go b/validator/db/kv/proposer_protection_test.go index 2d588d865c..48a9bc3084 100644 --- a/validator/db/kv/proposer_protection_test.go +++ b/validator/db/kv/proposer_protection_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -21,7 +20,7 @@ func TestNewProposalHistoryForSlot_ReturnsNilIfNoHistory(t *testing.T) { valPubkey := [fieldparams.BLSPubkeyLength]byte{1, 2, 3} db := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{}) - _, proposalExists, signingRootExists, err := db.ProposalHistoryForSlot(context.Background(), valPubkey, 0) + _, proposalExists, signingRootExists, err := db.ProposalHistoryForSlot(t.Context(), valPubkey, 0) require.NoError(t, err) assert.Equal(t, false, proposalExists) assert.Equal(t, false, signingRootExists) @@ -32,7 +31,7 @@ func TestProposalHistoryForSlot_InitializesNewPubKeys(t *testing.T) { db := setupDB(t, pubkeys) for _, pub := range pubkeys { - _, proposalExists, signingRootExists, err := db.ProposalHistoryForSlot(context.Background(), pub, 0) + _, proposalExists, signingRootExists, err := db.ProposalHistoryForSlot(t.Context(), pub, 0) require.NoError(t, err) assert.Equal(t, false, proposalExists) assert.Equal(t, false, signingRootExists) @@ -45,10 +44,10 @@ func TestNewProposalHistoryForSlot_SigningRootNil(t *testing.T) { db := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{}) - err := db.SaveProposalHistoryForSlot(context.Background(), pubkey, slot, nil) + err := db.SaveProposalHistoryForSlot(t.Context(), pubkey, slot, nil) require.NoError(t, err, "Saving proposal history failed: %v") - _, proposalExists, signingRootExists, err := db.ProposalHistoryForSlot(context.Background(), pubkey, slot) + _, proposalExists, signingRootExists, err := db.ProposalHistoryForSlot(t.Context(), pubkey, slot) require.NoError(t, err) assert.Equal(t, true, proposalExists) assert.Equal(t, false, signingRootExists) @@ -60,9 +59,9 @@ func TestSaveProposalHistoryForSlot_OK(t *testing.T) { slot := primitives.Slot(2) - err := db.SaveProposalHistoryForSlot(context.Background(), pubkey, slot, []byte{1}) + err := db.SaveProposalHistoryForSlot(t.Context(), pubkey, slot, []byte{1}) require.NoError(t, err, "Saving proposal history failed: %v") - signingRoot, proposalExists, signingRootExists, err := db.ProposalHistoryForSlot(context.Background(), pubkey, slot) + signingRoot, proposalExists, signingRootExists, err := db.ProposalHistoryForSlot(t.Context(), pubkey, slot) require.NoError(t, err, "Failed to get proposal history") assert.Equal(t, true, proposalExists) assert.Equal(t, true, signingRootExists) @@ -75,7 +74,7 @@ func TestNewProposalHistoryForPubKey_ReturnsEmptyIfNoHistory(t *testing.T) { valPubkey := [fieldparams.BLSPubkeyLength]byte{1, 2, 3} db := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{}) - proposalHistory, err := db.ProposalHistoryForPubKey(context.Background(), valPubkey) + proposalHistory, err := db.ProposalHistoryForPubKey(t.Context(), valPubkey) require.NoError(t, err) assert.DeepEqual(t, make([]*common.Proposal, 0), proposalHistory) } @@ -87,9 +86,9 @@ func TestSaveProposalHistoryForPubKey_OK(t *testing.T) { slot := primitives.Slot(2) root := [32]byte{1} - err := db.SaveProposalHistoryForSlot(context.Background(), pubkey, slot, root[:]) + err := db.SaveProposalHistoryForSlot(t.Context(), pubkey, slot, root[:]) require.NoError(t, err, "Saving proposal history failed: %v") - proposalHistory, err := db.ProposalHistoryForPubKey(context.Background(), pubkey) + proposalHistory, err := db.ProposalHistoryForPubKey(t.Context(), pubkey) require.NoError(t, err, "Failed to get proposal history") require.NotNil(t, proposalHistory) @@ -120,9 +119,9 @@ func TestSaveProposalHistoryForSlot_Overwrites(t *testing.T) { for _, tt := range tests { db := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubkey}) - err := db.SaveProposalHistoryForSlot(context.Background(), pubkey, 0, tt.signingRoot) + err := db.SaveProposalHistoryForSlot(t.Context(), pubkey, 0, tt.signingRoot) require.NoError(t, err, "Saving proposal history failed") - proposalHistory, err := db.ProposalHistoryForPubKey(context.Background(), pubkey) + proposalHistory, err := db.ProposalHistoryForPubKey(t.Context(), pubkey) require.NoError(t, err, "Failed to get proposal history") require.NotNil(t, proposalHistory) @@ -176,12 +175,12 @@ func TestPruneProposalHistoryBySlot_OK(t *testing.T) { for _, tt := range tests { db := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubKey}) for _, slot := range tt.slots { - err := db.SaveProposalHistoryForSlot(context.Background(), pubKey, slot, signedRoot) + err := db.SaveProposalHistoryForSlot(t.Context(), pubKey, slot, signedRoot) require.NoError(t, err, "Saving proposal history failed") } signingRootsBySlot := make(map[primitives.Slot][]byte) - proposalHistory, err := db.ProposalHistoryForPubKey(context.Background(), pubKey) + proposalHistory, err := db.ProposalHistoryForPubKey(t.Context(), pubKey) require.NoError(t, err) for _, hist := range proposalHistory { @@ -202,7 +201,7 @@ func TestPruneProposalHistoryBySlot_OK(t *testing.T) { } func TestStore_ProposedPublicKeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validatorDB, err := NewKVStore(ctx, t.TempDir(), &Config{}) require.NoError(t, err, "Failed to instantiate DB") t.Cleanup(func() { @@ -225,7 +224,7 @@ func TestStore_ProposedPublicKeys(t *testing.T) { } func TestStore_LowestSignedProposal(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubkey := [fieldparams.BLSPubkeyLength]byte{3} var dummySigningRoot [32]byte validatorDB := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubkey}) @@ -266,7 +265,7 @@ func TestStore_LowestSignedProposal(t *testing.T) { } func TestStore_HighestSignedProposal(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubkey := [fieldparams.BLSPubkeyLength]byte{3} var dummySigningRoot [32]byte validatorDB := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubkey}) @@ -307,7 +306,7 @@ func TestStore_HighestSignedProposal(t *testing.T) { } func Test_slashableProposalCheck_PreventsLowerThanMinProposal(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lowestSignedSlot := primitives.Slot(10) var pubkey [fieldparams.BLSPubkeyLength]byte @@ -334,7 +333,7 @@ func Test_slashableProposalCheck_PreventsLowerThanMinProposal(t *testing.T) { } wsb, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - err = db.SlashableProposalCheck(context.Background(), pubkey, wsb, [32]byte{4}, false, nil) + err = db.SlashableProposalCheck(t.Context(), pubkey, wsb, [32]byte{4}, false, nil) require.ErrorContains(t, "could not sign block with slot < lowest signed", err) // We expect the same block with a slot equal to the lowest @@ -377,7 +376,7 @@ func Test_slashableProposalCheck_PreventsLowerThanMinProposal(t *testing.T) { } func Test_slashableProposalCheck(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var pubkey [fieldparams.BLSPubkeyLength]byte pubkeyBytes, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a") @@ -450,6 +449,6 @@ func Test_slashableProposalCheck_RemoteProtection(t *testing.T) { sBlock, err := blocks.NewSignedBeaconBlock(blk) require.NoError(t, err) - err = db.SlashableProposalCheck(context.Background(), pubkey, sBlock, [32]byte{2}, false, nil) + err = db.SlashableProposalCheck(t.Context(), pubkey, sBlock, [32]byte{2}, false, nil) require.NoError(t, err, "Expected allowed block not to throw error") } diff --git a/validator/db/kv/proposer_settings_test.go b/validator/db/kv/proposer_settings_test.go index ca7ddc1669..118ee61e20 100644 --- a/validator/db/kv/proposer_settings_test.go +++ b/validator/db/kv/proposer_settings_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "testing" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" @@ -16,7 +15,7 @@ import ( func TestStore_ProposerSettings_ReadAndWrite(t *testing.T) { t.Run("save to db in full", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{}) key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a") require.NoError(t, err) @@ -50,7 +49,7 @@ func TestStore_ProposerSettings_ReadAndWrite(t *testing.T) { require.DeepEqual(t, settings, dbSettings) }) t.Run("update default settings then update at specific key", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() db := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{}) key1, err := hexutil.Decode("0xa057816155ad77931185101128655c0191bd0214c201ca48ed887f6c4c6adf334070efcd75140eada5ac83a92506dd7a") require.NoError(t, err) diff --git a/validator/db/kv/prune_attester_protection_test.go b/validator/db/kv/prune_attester_protection_test.go index caeff1a388..e5b79e96bf 100644 --- a/validator/db/kv/prune_attester_protection_test.go +++ b/validator/db/kv/prune_attester_protection_test.go @@ -1,7 +1,6 @@ package kv import ( - "context" "fmt" "testing" @@ -24,7 +23,7 @@ func TestPruneAttestations_NoPruning(t *testing.T) { require.NoError(t, err) // Next, attempt to prune and realize that we still have all epochs intact - err = validatorDB.PruneAttestations(context.Background()) + err = validatorDB.PruneAttestations(t.Context()) require.NoError(t, err) startEpoch := primitives.Epoch(0) @@ -54,7 +53,7 @@ func TestPruneAttestations_OK(t *testing.T) { require.NoError(t, setupAttestationsForEveryEpoch(validatorDB, pk, numEpochs)) } - require.NoError(t, validatorDB.PruneAttestations(context.Background())) + require.NoError(t, validatorDB.PruneAttestations(t.Context())) // Next, verify that we pruned every epoch // from genesis to SLASHING_PROTECTION_PRUNING_EPOCHS - 1. @@ -108,7 +107,7 @@ func BenchmarkPruneAttestations(b *testing.B) { } b.StartTimer() - require.NoError(b, validatorDB.PruneAttestations(context.Background())) + require.NoError(b, validatorDB.PruneAttestations(b.Context())) } } diff --git a/validator/db/restore_test.go b/validator/db/restore_test.go index d6d9b32d06..799056f984 100644 --- a/validator/db/restore_test.go +++ b/validator/db/restore_test.go @@ -1,7 +1,6 @@ package db import ( - "context" "flag" "os" "path" @@ -18,7 +17,7 @@ import ( func TestRestore(t *testing.T) { logHook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() backupDb, err := kv.NewKVStore(ctx, t.TempDir(), &kv.Config{}) defer func() { diff --git a/validator/db/testing/setup_db_test.go b/validator/db/testing/setup_db_test.go index 4d3b250cfb..b4de0530bd 100644 --- a/validator/db/testing/setup_db_test.go +++ b/validator/db/testing/setup_db_test.go @@ -1,7 +1,6 @@ package testing import ( - "context" "fmt" "path/filepath" "testing" @@ -27,7 +26,7 @@ func TestClearDB(t *testing.T) { PubKeys: nil, }) } else { - testDB, err = kv.NewKVStore(context.Background(), t.TempDir(), &kv.Config{ + testDB, err = kv.NewKVStore(t.Context(), t.TempDir(), &kv.Config{ PubKeys: nil, }) } diff --git a/validator/helpers/metadata_test.go b/validator/helpers/metadata_test.go index 1f32d7abe1..0cb1169675 100644 --- a/validator/helpers/metadata_test.go +++ b/validator/helpers/metadata_test.go @@ -215,7 +215,7 @@ func Test_validateMetadata(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := ValidateMetadata(context.Background(), NewValidatorDBMock(), tt.interchangeJSON); (err != nil) != tt.wantErr { + if err := ValidateMetadata(t.Context(), NewValidatorDBMock(), tt.interchangeJSON); (err != nil) != tt.wantErr { t.Errorf("validateMetadata() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -267,7 +267,7 @@ func Test_validateMetadataGenesisValidatorsRoot(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() validatorDB := NewValidatorDBMock() require.NoError(t, validatorDB.SaveGenesisValidatorsRoot(ctx, tt.dbGenesisValidatorsRoot)) err := ValidateMetadata(ctx, validatorDB, tt.interchangeJSON) diff --git a/validator/keymanager/derived/keymanager_test.go b/validator/keymanager/derived/keymanager_test.go index 78be767284..a6d6c88533 100644 --- a/validator/keymanager/derived/keymanager_test.go +++ b/validator/keymanager/derived/keymanager_test.go @@ -1,7 +1,6 @@ package derived import ( - "context" "fmt" "testing" @@ -24,7 +23,7 @@ const ( // We test that using a '25th word' mnemonic passphrase leads to different // public keys derived than not specifying the passphrase. func TestDerivedKeymanager_MnemonicPassphrase_DifferentResults(t *testing.T) { - ctx := context.Background() + ctx := t.Context() wallet := &mock.Wallet{ Files: make(map[string]map[string][]byte), AccountPasswords: make(map[string]string), @@ -84,7 +83,7 @@ func TestDerivedKeymanager_FetchValidatingPublicKeys(t *testing.T) { AccountPasswords: make(map[string]string), WalletPassword: password, } - ctx := context.Background() + ctx := t.Context() dr, err := NewKeymanager(ctx, &SetupConfig{ Wallet: wallet, ListenForChanges: false, @@ -123,7 +122,7 @@ func TestDerivedKeymanager_FetchValidatingPrivateKeys(t *testing.T) { AccountPasswords: make(map[string]string), WalletPassword: password, } - ctx := context.Background() + ctx := t.Context() dr, err := NewKeymanager(ctx, &SetupConfig{ Wallet: wallet, ListenForChanges: false, @@ -160,7 +159,7 @@ func TestDerivedKeymanager_Sign(t *testing.T) { AccountPasswords: make(map[string]string), WalletPassword: password, } - ctx := context.Background() + ctx := t.Context() dr, err := NewKeymanager(ctx, &SetupConfig{ Wallet: wallet, ListenForChanges: false, @@ -196,7 +195,7 @@ func TestDerivedKeymanager_Sign_NoPublicKeySpecified(t *testing.T) { PublicKey: nil, } dr := &Keymanager{} - _, err := dr.Sign(context.Background(), req) + _, err := dr.Sign(t.Context(), req) assert.ErrorContains(t, "nil public key", err) } @@ -205,6 +204,6 @@ func TestDerivedKeymanager_Sign_NoPublicKeyInCache(t *testing.T) { PublicKey: []byte("hello world"), } dr := &Keymanager{} - _, err := dr.Sign(context.Background(), req) + _, err := dr.Sign(t.Context(), req) assert.ErrorContains(t, "no signing key found", err) } diff --git a/validator/keymanager/local/backup_test.go b/validator/keymanager/local/backup_test.go index 1a834ccbf2..3561111cd7 100644 --- a/validator/keymanager/local/backup_test.go +++ b/validator/keymanager/local/backup_test.go @@ -1,7 +1,6 @@ package local import ( - "context" "encoding/hex" "testing" @@ -22,7 +21,7 @@ func TestLocalKeymanager_ExtractKeystores(t *testing.T) { validatingKeys[i] = secretKey secretKeysCache[bytesutil.ToBytes48(secretKey.PublicKey().Marshal())] = secretKey } - ctx := context.Background() + ctx := t.Context() password := "password" // Extracting 0 public keys should return 0 keystores. diff --git a/validator/keymanager/local/delete_test.go b/validator/keymanager/local/delete_test.go index 298dbf1c78..5e84e22102 100644 --- a/validator/keymanager/local/delete_test.go +++ b/validator/keymanager/local/delete_test.go @@ -1,7 +1,6 @@ package local import ( - "context" "encoding/json" "fmt" "strings" @@ -27,7 +26,7 @@ func TestLocalKeymanager_DeleteKeystores(t *testing.T) { accountsStore: &accountStore{}, } numAccounts := 5 - ctx := context.Background() + ctx := t.Context() keystores := make([]*keymanager.Keystore, numAccounts) passwords := make([]string, numAccounts) for i := 0; i < numAccounts; i++ { diff --git a/validator/keymanager/local/import_test.go b/validator/keymanager/local/import_test.go index 389f61a3df..ef35318817 100644 --- a/validator/keymanager/local/import_test.go +++ b/validator/keymanager/local/import_test.go @@ -1,7 +1,6 @@ package local import ( - "context" "fmt" "strconv" "testing" @@ -54,7 +53,7 @@ func TestLocalKeymanager_NoDuplicates(t *testing.T) { dr := &Keymanager{ wallet: wallet, } - ctx := context.Background() + ctx := t.Context() _, err := dr.CreateAccountsKeystore(ctx, privKeys, pubKeys) require.NoError(t, err) @@ -97,7 +96,7 @@ func TestLocalKeymanager_NoDuplicates(t *testing.T) { func TestLocalKeymanager_ImportKeystores(t *testing.T) { hook := logTest.NewGlobal() - ctx := context.Background() + ctx := t.Context() // Setup the keymanager. wallet := &mock.Wallet{ Files: make(map[string]map[string][]byte), diff --git a/validator/keymanager/local/keymanager_test.go b/validator/keymanager/local/keymanager_test.go index 1dda7ba78a..186ea4dd52 100644 --- a/validator/keymanager/local/keymanager_test.go +++ b/validator/keymanager/local/keymanager_test.go @@ -1,7 +1,6 @@ package local import ( - "context" "encoding/json" "strings" "testing" @@ -27,7 +26,7 @@ func TestLocalKeymanager_FetchValidatingPublicKeys(t *testing.T) { accountsStore: &accountStore{}, } // First, generate accounts and their keystore.json files. - ctx := context.Background() + ctx := t.Context() numAccounts := 10 wantedPubKeys := make([][fieldparams.BLSPubkeyLength]byte, 0) for i := 0; i < numAccounts; i++ { @@ -59,7 +58,7 @@ func TestLocalKeymanager_FetchValidatingPrivateKeys(t *testing.T) { accountsStore: &accountStore{}, } // First, generate accounts and their keystore.json files. - ctx := context.Background() + ctx := t.Context() numAccounts := 10 wantedPrivateKeys := make([][32]byte, numAccounts) for i := 0; i < numAccounts; i++ { @@ -94,7 +93,7 @@ func TestLocalKeymanager_Sign(t *testing.T) { } // First, generate accounts and their keystore.json files. - ctx := context.Background() + ctx := t.Context() numAccounts := 10 keystores := make([]*keymanager.Keystore, numAccounts) passwords := make([]string, numAccounts) @@ -155,7 +154,7 @@ func TestLocalKeymanager_Sign_NoPublicKeySpecified(t *testing.T) { PublicKey: nil, } dr := &Keymanager{} - _, err := dr.Sign(context.Background(), req) + _, err := dr.Sign(t.Context(), req) assert.ErrorContains(t, "nil public key", err) } @@ -165,6 +164,6 @@ func TestLocalKeymanager_Sign_NoPublicKeyInCache(t *testing.T) { } secretKeysCache = make(map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey) dr := &Keymanager{} - _, err := dr.Sign(context.Background(), req) + _, err := dr.Sign(t.Context(), req) assert.ErrorContains(t, "no signing key found in keys cache", err) } diff --git a/validator/keymanager/local/refresh_test.go b/validator/keymanager/local/refresh_test.go index c544a3c3fe..5e2a99b588 100644 --- a/validator/keymanager/local/refresh_test.go +++ b/validator/keymanager/local/refresh_test.go @@ -1,7 +1,6 @@ package local import ( - "context" "encoding/json" "testing" @@ -68,7 +67,7 @@ func TestLocalKeymanager_reloadAccountsFromKeystore(t *testing.T) { pubKeys[i] = privKey.PublicKey().Marshal() } - accountsStore, err := dr.CreateAccountsKeystore(context.Background(), privKeys, pubKeys) + accountsStore, err := dr.CreateAccountsKeystore(t.Context(), privKeys, pubKeys) require.NoError(t, err) require.NoError(t, dr.reloadAccountsFromKeystore(accountsStore)) diff --git a/validator/keymanager/remote-web3signer/internal/client_test.go b/validator/keymanager/remote-web3signer/internal/client_test.go index 297f4f558b..475b382084 100644 --- a/validator/keymanager/remote-web3signer/internal/client_test.go +++ b/validator/keymanager/remote-web3signer/internal/client_test.go @@ -2,7 +2,6 @@ package internal_test import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -45,7 +44,7 @@ func TestClient_Sign_HappyPath(t *testing.T) { cl := internal.ApiClient{BaseURL: u, RestClient: &http.Client{Transport: mock}} jsonRequest, err := json.Marshal(`{message: "hello"}`) assert.NoError(t, err) - resp, err := cl.Sign(context.Background(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) + resp, err := cl.Sign(t.Context(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) assert.NotNil(t, resp) assert.Nil(t, err) assert.EqualValues(t, "0xb3baa751d0a9132cfe93e4e3d5ff9075111100e3789dca219ade5a24d27e19d16b3353149da1833e9b691bb38634e8dc04469be7032132906c927d7e1a49b414730612877bc6b2810c8f202daf793d1ab0d6b5cb21d52f9e52e883859887a5d9", fmt.Sprintf("%#x", resp.Marshal())) @@ -74,7 +73,7 @@ func TestClient_Sign_HappyPath_Jsontype(t *testing.T) { cl := internal.ApiClient{BaseURL: u, RestClient: &http.Client{Transport: mock}} jsonRequest, err := json.Marshal(`{message: "hello"}`) assert.NoError(t, err) - resp, err := cl.Sign(context.Background(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) + resp, err := cl.Sign(t.Context(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) assert.NotNil(t, resp) assert.Nil(t, err) assert.EqualValues(t, "0xb3baa751d0a9132cfe93e4e3d5ff9075111100e3789dca219ade5a24d27e19d16b3353149da1833e9b691bb38634e8dc04469be7032132906c927d7e1a49b414730612877bc6b2810c8f202daf793d1ab0d6b5cb21d52f9e52e883859887a5d9", fmt.Sprintf("%#x", resp.Marshal())) @@ -93,7 +92,7 @@ func TestClient_Sign_500(t *testing.T) { cl := internal.ApiClient{BaseURL: u, RestClient: &http.Client{Transport: mock}} jsonRequest, err := json.Marshal(`{message: "hello"}`) assert.NoError(t, err) - resp, err := cl.Sign(context.Background(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) + resp, err := cl.Sign(t.Context(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) assert.NotNil(t, err) assert.Nil(t, resp) @@ -112,7 +111,7 @@ func TestClient_Sign_412(t *testing.T) { cl := internal.ApiClient{BaseURL: u, RestClient: &http.Client{Transport: mock}} jsonRequest, err := json.Marshal(`{message: "hello"}`) assert.NoError(t, err) - resp, err := cl.Sign(context.Background(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) + resp, err := cl.Sign(t.Context(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) assert.NotNil(t, err) assert.Nil(t, resp) @@ -131,7 +130,7 @@ func TestClient_Sign_400(t *testing.T) { cl := internal.ApiClient{BaseURL: u, RestClient: &http.Client{Transport: mock}} jsonRequest, err := json.Marshal(`{message: "hello"}`) assert.NoError(t, err) - resp, err := cl.Sign(context.Background(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) + resp, err := cl.Sign(t.Context(), "a2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820", jsonRequest) assert.NotNil(t, err) assert.Nil(t, resp) @@ -149,7 +148,7 @@ func TestClient_GetPublicKeys_HappyPath(t *testing.T) { u, err := url.Parse("example.com") assert.NoError(t, err) cl := internal.ApiClient{BaseURL: u, RestClient: &http.Client{Transport: mock}} - resp, err := cl.GetPublicKeys(context.Background(), "example.com/api/publickeys") + resp, err := cl.GetPublicKeys(t.Context(), "example.com/api/publickeys") assert.NotNil(t, resp) assert.Nil(t, err) // we would like them as 48byte base64 without 0x @@ -165,7 +164,7 @@ func TestClient_ReloadSignerKeys_HappyPath(t *testing.T) { u, err := url.Parse("example.com") assert.NoError(t, err) cl := internal.ApiClient{BaseURL: u, RestClient: &http.Client{Transport: mock}} - err = cl.ReloadSignerKeys(context.Background()) + err = cl.ReloadSignerKeys(t.Context()) assert.Nil(t, err) } @@ -180,7 +179,7 @@ func TestClient_GetServerStatus_HappyPath(t *testing.T) { u, err := url.Parse("example.com") assert.NoError(t, err) cl := internal.ApiClient{BaseURL: u, RestClient: &http.Client{Transport: mock}} - resp, err := cl.GetServerStatus(context.Background()) + resp, err := cl.GetServerStatus(t.Context()) assert.NotNil(t, resp) assert.Nil(t, err) } diff --git a/validator/keymanager/remote-web3signer/keymanager_test.go b/validator/keymanager/remote-web3signer/keymanager_test.go index 778d9bfa06..4eeef835ec 100644 --- a/validator/keymanager/remote-web3signer/keymanager_test.go +++ b/validator/keymanager/remote-web3signer/keymanager_test.go @@ -142,7 +142,7 @@ func TestNewKeymanager(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { logHook := logTest.NewGlobal() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() if tt.args.KeyFilePath != "" && len(tt.fileContents) != 0 { bytesBuf := new(bytes.Buffer) @@ -187,7 +187,7 @@ func TestNewKeyManager_fileMissing(t *testing.T) { } func TestNewKeyManager_ChangingFileCreated(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() keyFilePath := filepath.Join(t.TempDir(), "keyfile.txt") @@ -242,7 +242,7 @@ func TestNewKeyManager_ChangingFileCreated(t *testing.T) { } func TestNewKeyManager_FileAndFlagsWithDifferentKeys(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() logHook := logTest.NewGlobal() keyFilePath := filepath.Join(t.TempDir(), "keyfile.txt") @@ -288,7 +288,7 @@ func TestNewKeyManager_FileAndFlagsWithDifferentKeys(t *testing.T) { } func TestRefreshRemoteKeysFromFileChangesWithRetry(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) logHook := logTest.NewGlobal() root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69") require.NoError(t, err) @@ -338,7 +338,7 @@ func TestReadKeyFile_PathMissing(t *testing.T) { } func TestRefreshRemoteKeysFromFileChangesWithRetry_maxRetryReached(t *testing.T) { - ctx := context.Background() + ctx := t.Context() root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69") require.NoError(t, err) keyFilePath := filepath.Join(t.TempDir(), "keyfile.txt") @@ -359,7 +359,7 @@ func TestKeymanager_Sign(t *testing.T) { client := &MockClient{ Signature: "0xb3baa751d0a9132cfe93e4e3d5ff9075111100e3789dca219ade5a24d27e19d16b3353149da1833e9b691bb38634e8dc04469be7032132906c927d7e1a49b414730612877bc6b2810c8f202daf793d1ab0d6b5cb21d52f9e52e883859887a5d9", } - ctx := context.Background() + ctx := t.Context() root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69") if err != nil { fmt.Printf("error: %v", err) @@ -502,7 +502,7 @@ func TestKeymanager_Sign(t *testing.T) { } func TestKeymanager_FetchValidatingPublicKeys_HappyPath_WithKeyList(t *testing.T) { - ctx := context.Background() + ctx := t.Context() decodedKey, err := hexutil.Decode("0xa2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820") require.NoError(t, err) keys := [][48]byte{ @@ -529,7 +529,7 @@ func TestKeymanager_FetchValidatingPublicKeys_HappyPath_WithKeyList(t *testing.T } func TestKeymanager_FetchValidatingPublicKeys_HappyPath_WithExternalURL(t *testing.T) { - ctx := context.Background() + ctx := t.Context() decodedKey, err := hexutil.Decode("0xa2b5aaad9c6efefe7bb9b1243a043404f3362937cfb6b31833929833173f476630ea2cfeb0d9ddf15f97ca8685948820") if err != nil { fmt.Printf("error: %v", err) @@ -564,7 +564,7 @@ func TestKeymanager_FetchValidatingPublicKeys_HappyPath_WithExternalURL(t *testi } func TestKeymanager_FetchValidatingPublicKeys_WithExternalURL_ThrowsError(t *testing.T) { - ctx := context.Background() + ctx := t.Context() srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") @@ -587,7 +587,7 @@ func TestKeymanager_FetchValidatingPublicKeys_WithExternalURL_ThrowsError(t *tes } func TestKeymanager_AddPublicKeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69") if err != nil { fmt.Printf("error: %v", err) @@ -614,7 +614,7 @@ func TestKeymanager_AddPublicKeys(t *testing.T) { } func TestKeymanager_AddPublicKeys_WithFile(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() dir := t.TempDir() stdOutFile, err := os.Create(filepath.Clean(path.Join(dir, "keyfile.txt"))) @@ -652,7 +652,7 @@ func TestKeymanager_AddPublicKeys_WithFile(t *testing.T) { } func TestKeymanager_DeletePublicKeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() root, err := hexutil.Decode("0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69") if err != nil { fmt.Printf("error: %v", err) @@ -686,7 +686,7 @@ func TestKeymanager_DeletePublicKeys(t *testing.T) { } func TestKeymanager_DeletePublicKeys_WithFile(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() dir := t.TempDir() stdOutFile, err := os.Create(filepath.Clean(path.Join(dir, "keyfile.txt"))) diff --git a/validator/node/node_test.go b/validator/node/node_test.go index 867b187ab3..02f2657793 100644 --- a/validator/node/node_test.go +++ b/validator/node/node_test.go @@ -1,7 +1,6 @@ package node import ( - "context" "flag" "fmt" "os" @@ -201,7 +200,7 @@ func TestClearDB(t *testing.T) { t.Run(fmt.Sprintf("isMinimalDatabase=%v", isMinimalDatabase), func(t *testing.T) { hook := logtest.NewGlobal() tmp := filepath.Join(t.TempDir(), "datadirtest") - require.NoError(t, clearDB(context.Background(), tmp, true, isMinimalDatabase)) + require.NoError(t, clearDB(t.Context(), tmp, true, isMinimalDatabase)) require.LogsContain(t, hook, "Removing database") }) } diff --git a/validator/rpc/auth_token_test.go b/validator/rpc/auth_token_test.go index 709db827ba..f8f023ceee 100644 --- a/validator/rpc/auth_token_test.go +++ b/validator/rpc/auth_token_test.go @@ -48,7 +48,7 @@ func TestServer_AuthenticateUsingExistingToken(t *testing.T) { ctxMD := map[string][]string{ "authorization": {"Bearer " + srv.authToken}, } - ctx := context.Background() + ctx := t.Context() ctx = metadata.NewIncomingContext(ctx, ctxMD) _, err = srv.AuthTokenInterceptor()(ctx, "xyz", unaryInfo, unaryHandler) require.NoError(t, err) @@ -77,7 +77,7 @@ func TestServer_RefreshAuthTokenOnFileChange(t *testing.T) { require.NoError(t, err) currentToken := srv.authToken - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() go srv.refreshAuthTokenFromFileChanges(ctx, srv.authTokenPath) diff --git a/validator/rpc/beacon_test.go b/validator/rpc/beacon_test.go index d53cdedb48..dc436f02f0 100644 --- a/validator/rpc/beacon_test.go +++ b/validator/rpc/beacon_test.go @@ -1,7 +1,6 @@ package rpc import ( - "context" "testing" "github.com/OffchainLabs/prysm/v6/testing/assert" @@ -11,7 +10,7 @@ import ( func TestGrpcHeaders(t *testing.T) { s := &Server{ - ctx: context.Background(), + ctx: t.Context(), grpcHeaders: []string{"first=value1", "second=value2"}, } err := s.registerBeaconClient() diff --git a/validator/rpc/handler_wallet_test.go b/validator/rpc/handler_wallet_test.go index 3d6721a296..471b17ac93 100644 --- a/validator/rpc/handler_wallet_test.go +++ b/validator/rpc/handler_wallet_test.go @@ -2,7 +2,6 @@ package rpc import ( "bytes" - "context" "encoding/json" "fmt" "net/http" @@ -31,7 +30,7 @@ import ( const strongPass = "29384283xasjasd32%%&*@*#*" func TestServer_CreateWallet_Local(t *testing.T) { - ctx := context.Background() + ctx := t.Context() localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir opts := []accounts.Option{ @@ -424,7 +423,7 @@ func TestServer_WalletConfig_NoWalletFound(t *testing.T) { func TestServer_WalletConfig(t *testing.T) { localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir - ctx := context.Background() + ctx := t.Context() s := &Server{ walletInitializedFeed: new(event.Feed), walletDir: defaultWalletPath, diff --git a/validator/rpc/handlers_accounts_test.go b/validator/rpc/handlers_accounts_test.go index e6e68a65d5..f19fb84922 100644 --- a/validator/rpc/handlers_accounts_test.go +++ b/validator/rpc/handlers_accounts_test.go @@ -3,7 +3,6 @@ package rpc import ( "archive/zip" "bytes" - "context" "encoding/base64" "encoding/json" "fmt" @@ -37,7 +36,7 @@ var ( ) func TestServer_ListAccounts(t *testing.T) { - ctx := context.Background() + ctx := t.Context() localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir // We attempt to create the wallet. @@ -142,7 +141,7 @@ func TestServer_ListAccounts(t *testing.T) { } func TestServer_BackupAccounts(t *testing.T) { - ctx := context.Background() + ctx := t.Context() localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir // We attempt to create the wallet. @@ -235,7 +234,7 @@ func TestServer_BackupAccounts(t *testing.T) { func TestServer_VoluntaryExit(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() mockValidatorClient := validatormock.NewMockValidatorClient(ctrl) mockNodeClient := validatormock.NewMockNodeClient(ctrl) diff --git a/validator/rpc/handlers_auth_test.go b/validator/rpc/handlers_auth_test.go index e43ac7123d..b4c7d033cb 100644 --- a/validator/rpc/handlers_auth_test.go +++ b/validator/rpc/handlers_auth_test.go @@ -1,7 +1,6 @@ package rpc import ( - "context" "encoding/json" "net/http" "net/http/httptest" @@ -33,7 +32,7 @@ func TestInitialize(t *testing.T) { } acc, err := accounts.NewCLIManager(opts...) require.NoError(t, err) - _, err = acc.WalletCreate(context.Background()) + _, err = acc.WalletCreate(t.Context()) require.NoError(t, err) server := &Server{walletDir: localWalletDir, authTokenPath: authTokenPath} diff --git a/validator/rpc/handlers_health_test.go b/validator/rpc/handlers_health_test.go index 37cb2d91e5..47e499b337 100644 --- a/validator/rpc/handlers_health_test.go +++ b/validator/rpc/handlers_health_test.go @@ -72,7 +72,7 @@ func TestStreamBeaconLogs(t *testing.T) { // Setting up the mock in the server struct s := Server{ - ctx: context.Background(), + ctx: t.Context(), healthClient: mockClient, } @@ -107,7 +107,7 @@ func TestStreamBeaconLogs(t *testing.T) { } func TestStreamValidatorLogs(t *testing.T) { - ctx := context.Background() + ctx := t.Context() mockLogs := [][]byte{ []byte("[2023-10-31 10:00:00] INFO: Starting server..."), []byte("[2023-10-31 10:01:23] DEBUG: Database connection established."), @@ -166,7 +166,7 @@ func TestStreamValidatorLogs(t *testing.T) { func TestServer_GetVersion(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() mockNodeClient := validatormock.NewMockNodeClient(ctrl) s := Server{ ctx: ctx, diff --git a/validator/rpc/handlers_keymanager_test.go b/validator/rpc/handlers_keymanager_test.go index 13119efafc..31238cc2f7 100644 --- a/validator/rpc/handlers_keymanager_test.go +++ b/validator/rpc/handlers_keymanager_test.go @@ -2,7 +2,6 @@ package rpc import ( "bytes" - "context" "encoding/json" "flag" "fmt" @@ -49,7 +48,7 @@ import ( ) func TestServer_ListKeystores(t *testing.T) { - ctx := context.Background() + ctx := t.Context() t.Run("wallet not ready", func(t *testing.T) { m := &testutil.FakeValidator{} vs, err := client.NewValidatorService(ctx, &client.Config{ @@ -132,7 +131,7 @@ func TestServer_ListKeystores(t *testing.T) { } func TestServer_ImportKeystores(t *testing.T) { - ctx := context.Background() + ctx := t.Context() localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir opts := []accounts.Option{ @@ -353,7 +352,7 @@ func TestServer_ImportKeystores(t *testing.T) { } func TestServer_ImportKeystores_WrongKeymanagerKind(t *testing.T) { - ctx := context.Background() + ctx := t.Context() app := cli.App{} set := flag.NewFlagSet("test", 0) newDir := filepath.Join(t.TempDir(), "new") @@ -404,7 +403,7 @@ func TestServer_ImportKeystores_WrongKeymanagerKind(t *testing.T) { func TestServer_DeleteKeystores(t *testing.T) { for _, isSlashingProtectionMinimal := range []bool{false, true} { - ctx := context.Background() + ctx := t.Context() srv := setupServerWithWallet(t) // We recover 3 accounts from a test mnemonic. @@ -577,7 +576,7 @@ func TestServer_DeleteKeystores(t *testing.T) { func TestServer_DeleteKeystores_FailedSlashingProtectionExport(t *testing.T) { for _, isSlashingProtectionMinimal := range []bool{false, true} { t.Run(fmt.Sprintf("minimalSlashingProtection:%v", isSlashingProtectionMinimal), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() srv := setupServerWithWallet(t) // We recover 3 accounts from a test mnemonic. @@ -636,7 +635,7 @@ func TestServer_DeleteKeystores_FailedSlashingProtectionExport(t *testing.T) { } func TestServer_DeleteKeystores_WrongKeymanagerKind(t *testing.T) { - ctx := context.Background() + ctx := t.Context() app := cli.App{} set := flag.NewFlagSet("test", 0) newDir := filepath.Join(t.TempDir(), "new") @@ -680,7 +679,7 @@ func TestServer_DeleteKeystores_WrongKeymanagerKind(t *testing.T) { } func setupServerWithWallet(t testing.TB) *Server { - ctx := context.Background() + ctx := t.Context() localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir opts := []accounts.Option{ @@ -714,7 +713,7 @@ func TestServer_SetVoluntaryExit(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - ctx := context.Background() + ctx := t.Context() defaultWalletPath = setupWalletDir(t) opts := []accounts.Option{ accounts.WithWalletDir(defaultWalletPath), @@ -898,7 +897,7 @@ func TestServer_SetVoluntaryExit(t *testing.T) { } func TestServer_GetGasLimit(t *testing.T) { - ctx := context.Background() + ctx := t.Context() byteval, err := hexutil.Decode("0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493") byteval2, err2 := hexutil.Decode("0x1234567878903438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493") require.NoError(t, err) @@ -977,7 +976,7 @@ func TestServer_SetGasLimit(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() beaconClient := validatormock.NewMockValidatorClient(ctrl) - ctx := context.Background() + ctx := t.Context() pubkey1, err := hexutil.Decode("0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493") pubkey2, err2 := hexutil.Decode("0xbedefeaa94e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2cdddddddddddddddddddddddd") @@ -1185,7 +1184,7 @@ func TestServer_SetGasLimit_InvalidPubKey(t *testing.T) { } func TestServer_DeleteGasLimit(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubkey1, err := hexutil.Decode("0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493") pubkey2, err2 := hexutil.Decode("0xbedefeaa94e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2cdddddddddddddddddddddddd") require.NoError(t, err) @@ -1333,7 +1332,7 @@ func TestServer_DeleteGasLimit(t *testing.T) { } func TestServer_ListRemoteKeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() app := cli.App{} set := flag.NewFlagSet("test", 0) newDir := filepath.Join(t.TempDir(), "new") @@ -1389,7 +1388,7 @@ func TestServer_ListRemoteKeys(t *testing.T) { } func TestServer_ImportRemoteKeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() app := cli.App{} set := flag.NewFlagSet("test", 0) newDir := filepath.Join(t.TempDir(), "new") @@ -1450,7 +1449,7 @@ func TestServer_ImportRemoteKeys(t *testing.T) { } func TestServer_DeleteRemoteKeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() app := cli.App{} set := flag.NewFlagSet("test", 0) newDir := filepath.Join(t.TempDir(), "new") @@ -1511,7 +1510,7 @@ func TestServer_DeleteRemoteKeys(t *testing.T) { } func TestServer_ListFeeRecipientByPubkey(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubkey := "0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493" byteval, err := hexutil.Decode(pubkey) require.NoError(t, err) @@ -1589,7 +1588,7 @@ func TestServer_ListFeeRecipientByPubkey(t *testing.T) { } func TestServer_ListFeeRecipientByPubKey_NoFeeRecipientSet(t *testing.T) { - ctx := context.Background() + ctx := t.Context() vs, err := client.NewValidatorService(ctx, &client.Config{ Validator: &testutil.FakeValidator{}, @@ -1638,7 +1637,7 @@ func TestServer_FeeRecipientByPubkey(t *testing.T) { defer ctrl.Finish() beaconClient := validatormock.NewMockValidatorClient(ctrl) - ctx := context.Background() + ctx := t.Context() pubkey := "0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493" byteval, err := hexutil.Decode(pubkey) require.NoError(t, err) @@ -1848,7 +1847,7 @@ func TestServer_SetFeeRecipientByPubkey_InvalidFeeRecipient(t *testing.T) { } func TestServer_DeleteFeeRecipientByPubkey(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubkey := "0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493" byteval, err := hexutil.Decode(pubkey) require.NoError(t, err) @@ -1940,7 +1939,7 @@ func TestServer_DeleteFeeRecipientByPubkey_InvalidPubKey(t *testing.T) { func TestServer_Graffiti(t *testing.T) { graffiti := "graffiti" m := &testutil.FakeValidator{} - vs, err := client.NewValidatorService(context.Background(), &client.Config{ + vs, err := client.NewValidatorService(t.Context(), &client.Config{ Validator: m, }) require.NoError(t, err) diff --git a/validator/rpc/handlers_slashing_test.go b/validator/rpc/handlers_slashing_test.go index 9747c2dde5..9ace45a7ca 100644 --- a/validator/rpc/handlers_slashing_test.go +++ b/validator/rpc/handlers_slashing_test.go @@ -2,7 +2,6 @@ package rpc import ( "bytes" - "context" "encoding/json" "fmt" "net/http" @@ -23,7 +22,7 @@ import ( func TestImportSlashingProtection_Preconditions(t *testing.T) { for _, isSlashingProtectionMinimal := range []bool{false, true} { t.Run(fmt.Sprintf("slashing protection minimal: %v", isSlashingProtectionMinimal), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir @@ -119,7 +118,7 @@ func TestImportSlashingProtection_Preconditions(t *testing.T) { func TestExportSlashingProtection_Preconditions(t *testing.T) { for _, isSlashingProtectionMinimal := range []bool{false, true} { t.Run(fmt.Sprintf("slashing protection minimal: %v", isSlashingProtectionMinimal), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir @@ -146,7 +145,7 @@ func TestExportSlashingProtection_Preconditions(t *testing.T) { PubKeys: pubKeys, }) } else { - validatorDB, err = kv.NewKVStore(context.Background(), t.TempDir(), &kv.Config{ + validatorDB, err = kv.NewKVStore(t.Context(), t.TempDir(), &kv.Config{ PubKeys: pubKeys, }) } @@ -171,7 +170,7 @@ func TestExportSlashingProtection_Preconditions(t *testing.T) { func TestImportExportSlashingProtection_RoundTrip(t *testing.T) { // Round trip is only suitable with complete slashing protection, since // minimal slashing protections only keep latest attestation and proposal. - ctx := context.Background() + ctx := t.Context() localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir diff --git a/validator/rpc/intercepter_test.go b/validator/rpc/intercepter_test.go index bfbf5e3759..5fca66058c 100644 --- a/validator/rpc/intercepter_test.go +++ b/validator/rpc/intercepter_test.go @@ -30,7 +30,7 @@ func TestServer_AuthTokenInterceptor_Verify(t *testing.T) { ctxMD := map[string][]string{ "authorization": {"Bearer " + token}, } - ctx := context.Background() + ctx := t.Context() ctx = metadata.NewIncomingContext(ctx, ctxMD) _, err := interceptor(ctx, "xyz", unaryInfo, unaryHandler) require.NoError(t, err) @@ -52,7 +52,7 @@ func TestServer_AuthTokenInterceptor_BadToken(t *testing.T) { ctxMD := map[string][]string{ "authorization": {"Bearer bad-token"}, } - ctx := context.Background() + ctx := t.Context() ctx = metadata.NewIncomingContext(ctx, ctxMD) _, err := interceptor(ctx, "xyz", unaryInfo, unaryHandler) require.ErrorContains(t, "token value is invalid", err) diff --git a/validator/slashing-protection-history/export_test.go b/validator/slashing-protection-history/export_test.go index 8132522884..81219d56c8 100644 --- a/validator/slashing-protection-history/export_test.go +++ b/validator/slashing-protection-history/export_test.go @@ -1,7 +1,6 @@ package history import ( - "context" "fmt" "testing" @@ -17,7 +16,7 @@ import ( func TestExportStandardProtectionJSON_EmptyGenesisRoot(t *testing.T) { for _, isSlashingProtectionMinimal := range [...]bool{false, true} { t.Run(fmt.Sprintf("isSlashingProtectionMinimal=%v", isSlashingProtectionMinimal), func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() pubKeys := [][fieldparams.BLSPubkeyLength]byte{ {1}, } @@ -39,7 +38,7 @@ func Test_getSignedAttestationsByPubKey(t *testing.T) { pubKeys := [][fieldparams.BLSPubkeyLength]byte{ {1}, } - ctx := context.Background() + ctx := t.Context() validatorDB := dbtest.SetupDB(t, t.TempDir(), pubKeys, isSlashingProtectionMinimal) // No attestation history stored should return empty. @@ -95,7 +94,7 @@ func Test_getSignedAttestationsByPubKey(t *testing.T) { pubKeys := [][fieldparams.BLSPubkeyLength]byte{ {1}, } - ctx := context.Background() + ctx := t.Context() isSlashingProtectionMinimal := false validatorDB := dbtest.SetupDB(t, t.TempDir(), pubKeys, isSlashingProtectionMinimal) @@ -142,7 +141,7 @@ func Test_getSignedAttestationsByPubKey(t *testing.T) { pubKeys := [][fieldparams.BLSPubkeyLength]byte{ {1}, } - ctx := context.Background() + ctx := t.Context() isSlashingProtectionMinimal := false validatorDB := dbtest.SetupDB(t, t.TempDir(), pubKeys, isSlashingProtectionMinimal) @@ -195,7 +194,7 @@ func Test_getSignedBlocksByPubKey(t *testing.T) { pubKeys := [][fieldparams.BLSPubkeyLength]byte{ {1}, } - ctx := context.Background() + ctx := t.Context() validatorDB := dbtest.SetupDB(t, t.TempDir(), pubKeys, isSlashingProtectionMinimal) // No highest and/or lowest signed blocks will return empty. diff --git a/validator/slashing-protection-history/round_trip_test.go b/validator/slashing-protection-history/round_trip_test.go index 66f962549d..c096f00b16 100644 --- a/validator/slashing-protection-history/round_trip_test.go +++ b/validator/slashing-protection-history/round_trip_test.go @@ -2,7 +2,6 @@ package history_test import ( "bytes" - "context" "encoding/json" "fmt" "testing" @@ -23,7 +22,7 @@ import ( // it does not keep track of attestation and proposal histories, and thus cannot // export the same data it imported. func TestImportExport_RoundTrip(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 10 publicKeys, err := slashtest.CreateRandomPubKeys(numValidators) require.NoError(t, err) @@ -92,7 +91,7 @@ func TestImportExport_RoundTrip(t *testing.T) { // it does not keep track of attestation and proposal histories, and thus cannot // export the same data it imported. func TestImportExport_RoundTrip_SkippedAttestationEpochs(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 1 pubKeys, err := slashtest.CreateRandomPubKeys(numValidators) require.NoError(t, err) @@ -158,7 +157,7 @@ func TestImportExport_RoundTrip_SkippedAttestationEpochs(t *testing.T) { // it does not keep track of attestation and proposal histories, and thus cannot // export the same data it imported. func TestImportExport_FilterKeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 10 publicKeys, err := slashtest.CreateRandomPubKeys(numValidators) require.NoError(t, err) @@ -203,7 +202,7 @@ func TestImportExport_FilterKeys(t *testing.T) { // it does not keep track of attestation and proposal histories, and thus cannot // export the same data it imported. func TestImportInterchangeData_OK(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 10 publicKeys, err := slashtest.CreateRandomPubKeys(numValidators) require.NoError(t, err) @@ -273,7 +272,7 @@ func TestImportInterchangeData_OK(t *testing.T) { // it does not keep track of attestation and proposal histories, and thus cannot // export the same data it imported. func TestImportInterchangeData_OK_SavesBlacklistedPublicKeys(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 3 publicKeys, err := slashtest.CreateRandomPubKeys(numValidators) require.NoError(t, err) @@ -368,7 +367,7 @@ func TestImportInterchangeData_OK_SavesBlacklistedPublicKeys(t *testing.T) { // it does not keep track of attestation and proposal histories, and thus cannot // export the same data it imported. func TestStore_ImportInterchangeData_BadFormat_PreventsDBWrites(t *testing.T) { - ctx := context.Background() + ctx := t.Context() numValidators := 5 publicKeys, err := slashtest.CreateRandomPubKeys(numValidators) require.NoError(t, err)