mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 21:08:10 -05:00
* overhaul fork schedule management for bpos * Unify log * Radek's comments * Use arg config to determine previous epoch, with regression test * Remove unnecessary NewClock. @potuz feedback * Continuation of previous commit: Remove unnecessary NewClock. @potuz feedback * Remove VerifyBlockHeaderSignatureUsingCurrentFork * cosmetic changes * Remove unnecessary copy. entryWithForkDigest passes by value, not by pointer so it shold be fine * Reuse ErrInvalidTopic from p2p package * Unskip TestServer_GetBeaconConfig * Resolve TODO about forkwatcher in local mode * remove Copy() --------- Co-authored-by: Kasey <kasey@users.noreply.github.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: rkapka <radoslaw.kapka@gmail.com> Co-authored-by: Preston Van Loon <preston@pvl.dev>
269 lines
8.2 KiB
Go
269 lines
8.2 KiB
Go
package params_test
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"math"
|
|
"sync"
|
|
"testing"
|
|
|
|
"github.com/OffchainLabs/prysm/v6/config/params"
|
|
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
|
"github.com/OffchainLabs/prysm/v6/genesis"
|
|
"github.com/OffchainLabs/prysm/v6/testing/require"
|
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
)
|
|
|
|
// Test cases can be executed in an arbitrary order. TestOverrideBeaconConfigTestTeardown checks
|
|
// that there's no state mutation leak from the previous test, therefore we need a sentinel flag,
|
|
// to make sure that previous test case has already been completed and check can be run.
|
|
var testOverrideBeaconConfigExecuted bool
|
|
|
|
func TestConfig_OverrideBeaconConfig(t *testing.T) {
|
|
// Ensure that param modifications are safe.
|
|
params.SetupTestConfigCleanup(t)
|
|
cfg := params.BeaconConfig()
|
|
cfg.SlotsPerEpoch = 5
|
|
params.OverrideBeaconConfig(cfg)
|
|
if c := params.BeaconConfig(); c.SlotsPerEpoch != 5 {
|
|
t.Errorf("Shardcount in BeaconConfig incorrect. Wanted %d, got %d", 5, c.SlotsPerEpoch)
|
|
}
|
|
testOverrideBeaconConfigExecuted = true
|
|
}
|
|
|
|
func TestConfig_OverrideBeaconConfigTestTeardown(t *testing.T) {
|
|
if !testOverrideBeaconConfigExecuted {
|
|
t.Skip("State leak can occur only if state mutating test has already completed")
|
|
}
|
|
cfg := params.BeaconConfig()
|
|
if cfg.SlotsPerEpoch == 5 {
|
|
t.Fatal("Parameter update has been leaked out of previous test")
|
|
}
|
|
}
|
|
|
|
func TestConfig_DataRace(t *testing.T) {
|
|
params.SetupTestConfigCleanup(t)
|
|
wg := new(sync.WaitGroup)
|
|
for i := 0; i < 10; i++ {
|
|
wg.Add(2)
|
|
go func() {
|
|
defer wg.Done()
|
|
cfg := params.BeaconConfig()
|
|
params.OverrideBeaconConfig(cfg)
|
|
}()
|
|
go func() uint64 {
|
|
defer wg.Done()
|
|
return params.BeaconConfig().MaxDeposits
|
|
}()
|
|
}
|
|
wg.Wait()
|
|
}
|
|
|
|
func TestConfig_WithinDAPeriod(t *testing.T) {
|
|
cases := []struct {
|
|
name string
|
|
block primitives.Epoch
|
|
current primitives.Epoch
|
|
within bool
|
|
}{
|
|
{
|
|
name: "before",
|
|
block: 0,
|
|
current: params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1,
|
|
within: false,
|
|
},
|
|
{
|
|
name: "same",
|
|
block: 0,
|
|
current: 0,
|
|
within: true,
|
|
},
|
|
{
|
|
name: "boundary",
|
|
block: 0,
|
|
current: params.BeaconConfig().MinEpochsForBlobsSidecarsRequest,
|
|
within: true,
|
|
},
|
|
{
|
|
name: "one less",
|
|
block: params.BeaconConfig().MinEpochsForBlobsSidecarsRequest - 1,
|
|
current: params.BeaconConfig().MinEpochsForBlobsSidecarsRequest,
|
|
within: true,
|
|
},
|
|
}
|
|
for _, c := range cases {
|
|
t.Run(c.name, func(t *testing.T) {
|
|
require.Equal(t, c.within, params.WithinDAPeriod(c.block, c.current))
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestConfigGenesisValidatorRoot(t *testing.T) {
|
|
params.SetActiveTestCleanup(t, params.MainnetBeaconConfig)
|
|
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
|
g, err := genesis.State()
|
|
require.NoError(t, err, "failed to load genesis state")
|
|
if !bytes.Equal(g.GenesisValidatorsRoot(), params.BeaconConfig().GenesisValidatorsRoot[:]) {
|
|
t.Fatal("mainnet params genesis validator root does not match the mainnet genesis state value")
|
|
}
|
|
require.Equal(t, params.BeaconConfig().GenesisValidatorsRoot, genesis.ValidatorsRoot())
|
|
}
|
|
|
|
func TestMaxBlobsPerBlock(t *testing.T) {
|
|
t.Run("Before all forks and no BlobSchedule", func(t *testing.T) {
|
|
cfg := params.MainnetConfig()
|
|
cfg.BlobSchedule = nil
|
|
cfg.ElectraForkEpoch = 100
|
|
cfg.FuluForkEpoch = 200
|
|
require.Equal(t, cfg.MaxBlobsPerBlock(0), cfg.DeprecatedMaxBlobsPerBlock)
|
|
})
|
|
|
|
t.Run("Uses latest matching BlobSchedule entry", func(t *testing.T) {
|
|
cfg := params.MainnetConfig()
|
|
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
|
{Epoch: 5, MaxBlobsPerBlock: 7},
|
|
{Epoch: 10, MaxBlobsPerBlock: 11},
|
|
}
|
|
slot := 11 * cfg.SlotsPerEpoch
|
|
require.Equal(t, cfg.MaxBlobsPerBlock(slot), 11)
|
|
})
|
|
|
|
t.Run("Uses earlier matching BlobSchedule entry", func(t *testing.T) {
|
|
cfg := params.MainnetConfig()
|
|
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
|
{Epoch: 5, MaxBlobsPerBlock: 7},
|
|
{Epoch: 10, MaxBlobsPerBlock: 11},
|
|
}
|
|
slot := 6 * cfg.SlotsPerEpoch
|
|
require.Equal(t, cfg.MaxBlobsPerBlock(slot), 7)
|
|
})
|
|
|
|
t.Run("Before first BlobSchedule entry falls back to fork logic", func(t *testing.T) {
|
|
cfg := params.MainnetConfig()
|
|
cfg.FuluForkEpoch = 1
|
|
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
|
{Epoch: 5, MaxBlobsPerBlock: 7},
|
|
}
|
|
slot := primitives.Slot(2) // Epoch 0
|
|
require.Equal(t, cfg.MaxBlobsPerBlock(slot), cfg.DeprecatedMaxBlobsPerBlock)
|
|
})
|
|
|
|
t.Run("Unsorted BlobSchedule still picks latest matching entry", func(t *testing.T) {
|
|
cfg := params.MainnetConfig()
|
|
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
|
{Epoch: 10, MaxBlobsPerBlock: 11},
|
|
{Epoch: 5, MaxBlobsPerBlock: 7},
|
|
}
|
|
slot := 11 * cfg.SlotsPerEpoch
|
|
require.Equal(t, cfg.MaxBlobsPerBlock(slot), 11)
|
|
})
|
|
|
|
t.Run("Unsorted BlobSchedule picks earlier matching entry correctly", func(t *testing.T) {
|
|
cfg := params.MainnetConfig()
|
|
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
|
{Epoch: 10, MaxBlobsPerBlock: 11},
|
|
{Epoch: 5, MaxBlobsPerBlock: 7},
|
|
}
|
|
slot := 6 * cfg.SlotsPerEpoch
|
|
require.Equal(t, cfg.MaxBlobsPerBlock(slot), 7)
|
|
})
|
|
|
|
t.Run("Unsorted BlobSchedule falls back to fork logic when epoch is before all entries", func(t *testing.T) {
|
|
cfg := params.MainnetConfig()
|
|
cfg.ElectraForkEpoch = 2
|
|
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
|
{Epoch: 10, MaxBlobsPerBlock: 11},
|
|
{Epoch: 5, MaxBlobsPerBlock: 7},
|
|
}
|
|
slot := primitives.Slot(1) // Epoch 0
|
|
require.Equal(t, cfg.MaxBlobsPerBlock(slot), cfg.DeprecatedMaxBlobsPerBlock)
|
|
})
|
|
}
|
|
|
|
func Test_TargetBlobCount(t *testing.T) {
|
|
cfg := params.MainnetConfig()
|
|
cfg.ElectraForkEpoch = 10
|
|
require.Equal(t, cfg.TargetBlobsPerBlock(primitives.Slot(cfg.ElectraForkEpoch)*cfg.SlotsPerEpoch-1), 3)
|
|
require.Equal(t, cfg.TargetBlobsPerBlock(primitives.Slot(cfg.ElectraForkEpoch)*cfg.SlotsPerEpoch), 6)
|
|
cfg.ElectraForkEpoch = math.MaxUint64
|
|
}
|
|
|
|
func fillGVR(value byte) [32]byte {
|
|
var gvr [32]byte
|
|
for i := 0; i < len(gvr); i++ {
|
|
gvr[i] = value
|
|
}
|
|
return gvr
|
|
}
|
|
|
|
func TestEntryWithForkDigest(t *testing.T) {
|
|
var zero [32]byte
|
|
one := fillGVR(byte(1))
|
|
two := fillGVR(byte(2))
|
|
three := fillGVR(byte(3))
|
|
configs := map[[32]byte]*params.BeaconChainConfig{
|
|
zero: testConfigForSchedule(zero),
|
|
one: testConfigForSchedule(one),
|
|
two: testConfigForSchedule(two),
|
|
three: testConfigForSchedule(three),
|
|
}
|
|
for _, cfg := range configs {
|
|
cfg.InitializeForkSchedule()
|
|
}
|
|
cases := []struct {
|
|
epoch primitives.Epoch
|
|
gvr [32]byte
|
|
expected string
|
|
}{
|
|
{epoch: 9, expected: "0x97b2c268"},
|
|
{epoch: 10, expected: "0x97b2c268"},
|
|
{epoch: 11, expected: "0x97b2c268"},
|
|
{epoch: 99, expected: "0x97b2c268"},
|
|
{epoch: 100, expected: "0x44a571e8"},
|
|
{epoch: 101, expected: "0x44a571e8"},
|
|
{epoch: 150, expected: "0x1171afca"},
|
|
{epoch: 199, expected: "0x1171afca"},
|
|
{epoch: 200, expected: "0x427a30ab"},
|
|
{epoch: 201, expected: "0x427a30ab"},
|
|
{epoch: 250, expected: "0xd5310ef1"},
|
|
{epoch: 299, expected: "0xd5310ef1"},
|
|
{epoch: 300, expected: "0x51d229f7"},
|
|
{epoch: 301, expected: "0x51d229f7"},
|
|
{epoch: 9, gvr: fillGVR(byte(1)), expected: "0x4a5c3011"},
|
|
{epoch: 9, gvr: fillGVR(byte(2)), expected: "0xe8332b52"},
|
|
{epoch: 9, gvr: fillGVR(byte(3)), expected: "0x0e38e75e"},
|
|
{epoch: 100, gvr: fillGVR(byte(1)), expected: "0xbfe98545"},
|
|
{epoch: 100, gvr: fillGVR(byte(2)), expected: "0x9b7e4788"},
|
|
{epoch: 100, gvr: fillGVR(byte(3)), expected: "0x8b5ce4af"},
|
|
}
|
|
for _, c := range cases {
|
|
t.Run(fmt.Sprintf("%d_%s", c.epoch, c.expected), func(t *testing.T) {
|
|
var expected [4]byte
|
|
err := hexutil.UnmarshalFixedText("ForkDigest", []byte(c.expected), expected[:])
|
|
require.NoError(t, err)
|
|
cfg := configs[c.gvr]
|
|
digest := params.ForkDigestUsingConfig(c.epoch, cfg)
|
|
require.Equal(t, expected, digest)
|
|
})
|
|
}
|
|
}
|
|
|
|
func testConfigForSchedule(gvr [32]byte) *params.BeaconChainConfig {
|
|
cfg := params.MinimalSpecConfig().Copy()
|
|
cfg.AltairForkEpoch = 0
|
|
cfg.BellatrixForkEpoch = 0
|
|
cfg.CapellaForkEpoch = 0
|
|
cfg.DenebForkEpoch = 0
|
|
cfg.ElectraForkEpoch = 9
|
|
cfg.FuluForkEpoch = 100
|
|
cfg.GenesisValidatorsRoot = gvr
|
|
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
|
{Epoch: 100, MaxBlobsPerBlock: 100},
|
|
{Epoch: 150, MaxBlobsPerBlock: 175},
|
|
{Epoch: 200, MaxBlobsPerBlock: 200},
|
|
{Epoch: 250, MaxBlobsPerBlock: 275},
|
|
{Epoch: 300, MaxBlobsPerBlock: 300},
|
|
}
|
|
return cfg
|
|
}
|