mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 15:37:56 -05:00
* Update Earliest available slot when pruning * bazel run //:gazelle -- fix * custodyUpdater interface to avoid import cycle * bazel run //:gazelle -- fix * simplify test * separation of concerns * debug log for updating eas * UpdateEarliestAvailableSlot function in CustodyManager * fix test * UpdateEarliestAvailableSlot function for FakeP2P * lint * UpdateEarliestAvailableSlot instead of UpdateCustodyInfo + check for Fulu * fix test and lint * bugfix: enforce minimum retention period in pruner * remove MinEpochsForBlockRequests function and use from config * remove modifying earliest_available_slot after data column pruning * correct earliestAvailableSlot validation: allow backfill decrease but prevent increase within MIN_EPOCHS_FOR_BLOCK_REQUESTS * lint * bazel run //:gazelle -- fix * lint and remove unwanted debug logs * Return a wrapped error, and let the caller decide what to do * fix tests because updateEarliestSlot returns error now * avoid re-doing computation in the test function * lint and correct changelog * custody updater should be a mandatory part of the pruner service * ensure never increase eas if we are in the block requests window * slot level granularity edge case * update the value stored in the DB * log tidy up * use errNoCustodyInfo * allow earliestAvailableSlot edit when custodyGroupCount doesnt change * undo the minimal config change * add context to CustodyGroupCount after merging from develop * cosmetic change * shift responsibility from caller to callee, protection for updateEarliestSlot. UpdateEarliestAvailableSlot returns cgc * allow increase in earliestAvailableSlot only when custodyGroupCount also increases * remove CustodyGroupCount as it is no longer needed as UpdateEarliestAvailableSlot returns cgc now * proper place for log and name refactor * test for Nil custody info * allow decreasing earliest slot in DB (just like in memory) * invert if statement to make more readable * UpdateEarliestAvailableSlot for DB (equivalent of p2p's UpdateEarliestAvailableSlot) & undo changes made to UpdateCustodyInfo * in UpdateEarliestAvailableSlot, no need to return unused values * no need to log stored group count * log.WithField instead of log.WithFields
134 lines
4.4 KiB
Go
134 lines
4.4 KiB
Go
package backfill
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
|
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
|
"github.com/OffchainLabs/prysm/v6/config/params"
|
|
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
|
"github.com/OffchainLabs/prysm/v6/proto/dbval"
|
|
"github.com/OffchainLabs/prysm/v6/testing/require"
|
|
"github.com/OffchainLabs/prysm/v6/testing/util"
|
|
)
|
|
|
|
type mockMinimumSlotter struct {
|
|
min primitives.Slot
|
|
}
|
|
|
|
func (m mockMinimumSlotter) minimumSlot(_ primitives.Slot) primitives.Slot {
|
|
return m.min
|
|
}
|
|
|
|
type mockInitalizerWaiter struct {
|
|
}
|
|
|
|
func (*mockInitalizerWaiter) WaitForInitializer(_ context.Context) (*verification.Initializer, error) {
|
|
return &verification.Initializer{}, nil
|
|
}
|
|
|
|
func TestServiceInit(t *testing.T) {
|
|
ctx, cancel := context.WithTimeout(t.Context(), time.Second*300)
|
|
defer cancel()
|
|
db := &mockBackfillDB{}
|
|
su, err := NewUpdater(ctx, db)
|
|
require.NoError(t, err)
|
|
nWorkers := 5
|
|
var batchSize uint64 = 100
|
|
nBatches := nWorkers * 2
|
|
var high uint64 = 11235
|
|
originRoot := [32]byte{}
|
|
origin, err := util.NewBeaconState()
|
|
require.NoError(t, err)
|
|
db.states = map[[32]byte]state.BeaconState{originRoot: origin}
|
|
su.bs = &dbval.BackfillStatus{
|
|
LowSlot: high,
|
|
OriginRoot: originRoot[:],
|
|
}
|
|
remaining := nBatches
|
|
cw := startup.NewClockSynchronizer()
|
|
require.NoError(t, cw.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
|
pool := &mockPool{todoChan: make(chan batch, nWorkers), finishedChan: make(chan batch, nWorkers)}
|
|
p2pt := p2ptest.NewTestP2P(t)
|
|
bfs := filesystem.NewEphemeralBlobStorage(t)
|
|
srv, err := NewService(ctx, su, bfs, cw, p2pt, &mockAssigner{},
|
|
WithBatchSize(batchSize), WithWorkerCount(nWorkers), WithEnableBackfill(true), WithVerifierWaiter(&mockInitalizerWaiter{}))
|
|
require.NoError(t, err)
|
|
srv.ms = mockMinimumSlotter{min: primitives.Slot(high - batchSize*uint64(nBatches))}.minimumSlot
|
|
srv.pool = pool
|
|
srv.batchImporter = func(context.Context, primitives.Slot, batch, *Store) (*dbval.BackfillStatus, error) {
|
|
return &dbval.BackfillStatus{}, nil
|
|
}
|
|
go srv.Start()
|
|
todo := make([]batch, 0)
|
|
todo = testReadN(ctx, t, pool.todoChan, nWorkers, todo)
|
|
require.Equal(t, nWorkers, len(todo))
|
|
for i := 0; i < remaining; i++ {
|
|
b := todo[i]
|
|
if b.state == batchSequenced {
|
|
b.state = batchImportable
|
|
}
|
|
pool.finishedChan <- b
|
|
todo = testReadN(ctx, t, pool.todoChan, 1, todo)
|
|
}
|
|
require.Equal(t, remaining+nWorkers, len(todo))
|
|
for i := remaining; i < remaining+nWorkers; i++ {
|
|
require.Equal(t, batchEndSequence, todo[i].state)
|
|
}
|
|
}
|
|
|
|
func TestMinimumBackfillSlot(t *testing.T) {
|
|
oe := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
|
|
|
currSlot := (oe + 100).Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
|
|
minSlot := minimumBackfillSlot(primitives.Slot(currSlot))
|
|
require.Equal(t, 100*params.BeaconConfig().SlotsPerEpoch, minSlot)
|
|
|
|
currSlot = oe.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
|
|
minSlot = minimumBackfillSlot(primitives.Slot(currSlot))
|
|
require.Equal(t, primitives.Slot(1), minSlot)
|
|
}
|
|
|
|
func testReadN(ctx context.Context, t *testing.T, c chan batch, n int, into []batch) []batch {
|
|
for i := 0; i < n; i++ {
|
|
select {
|
|
case b := <-c:
|
|
into = append(into, b)
|
|
case <-ctx.Done():
|
|
// this means we hit the timeout, so something went wrong.
|
|
require.Equal(t, true, false)
|
|
}
|
|
}
|
|
return into
|
|
}
|
|
|
|
func TestBackfillMinSlotDefault(t *testing.T) {
|
|
oe := primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
|
current := primitives.Slot((oe + 100).Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
|
s := &Service{}
|
|
specMin := minimumBackfillSlot(current)
|
|
|
|
t.Run("equal to specMin", func(t *testing.T) {
|
|
opt := WithMinimumSlot(specMin)
|
|
require.NoError(t, opt(s))
|
|
require.Equal(t, specMin, s.ms(current))
|
|
})
|
|
t.Run("older than specMin", func(t *testing.T) {
|
|
opt := WithMinimumSlot(specMin - 1)
|
|
require.NoError(t, opt(s))
|
|
// if WithMinimumSlot is older than the spec minimum, we should use it.
|
|
require.Equal(t, specMin-1, s.ms(current))
|
|
})
|
|
t.Run("newer than specMin", func(t *testing.T) {
|
|
opt := WithMinimumSlot(specMin + 1)
|
|
require.NoError(t, opt(s))
|
|
// if WithMinimumSlot is newer than the spec minimum, we should use the spec minimum
|
|
require.Equal(t, specMin, s.ms(current))
|
|
})
|
|
}
|