mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 04:54:05 -05:00
Refactor fork schedules (#15490)
* overhaul fork schedule management for bpos * Unify log * Radek's comments * Use arg config to determine previous epoch, with regression test * Remove unnecessary NewClock. @potuz feedback * Continuation of previous commit: Remove unnecessary NewClock. @potuz feedback * Remove VerifyBlockHeaderSignatureUsingCurrentFork * cosmetic changes * Remove unnecessary copy. entryWithForkDigest passes by value, not by pointer so it shold be fine * Reuse ErrInvalidTopic from p2p package * Unskip TestServer_GetBeaconConfig * Resolve TODO about forkwatcher in local mode * remove Copy() --------- Co-authored-by: Kasey <kasey@users.noreply.github.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: rkapka <radoslaw.kapka@gmail.com> Co-authored-by: Preston Van Loon <preston@pvl.dev>
This commit is contained in:
@@ -7,6 +7,8 @@ go_library(
|
||||
"config_utils_develop.go", # keep
|
||||
"config_utils_prod.go",
|
||||
"configset.go",
|
||||
"errors.go",
|
||||
"fork.go",
|
||||
"init.go",
|
||||
"interop.go",
|
||||
"io_config.go",
|
||||
@@ -29,9 +31,11 @@ go_library(
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//params:go_default_library",
|
||||
@@ -49,6 +53,8 @@ go_test(
|
||||
"checktags_test.go",
|
||||
"config_test.go",
|
||||
"configset_test.go",
|
||||
"export_test.go",
|
||||
"fork_test.go",
|
||||
"loader_test.go",
|
||||
"mainnet_config_export_test.go",
|
||||
"mainnet_config_test.go",
|
||||
@@ -75,8 +81,10 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
|
||||
@@ -2,16 +2,24 @@
|
||||
package params
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BeaconChainConfig contains constant configs for node to participate in beacon chain.
|
||||
@@ -314,6 +322,10 @@ type BeaconChainConfig struct {
|
||||
// DeprecatedMaxBlobsPerBlockFulu defines the max blobs that could exist in a block post Fulu hard fork.
|
||||
// Deprecated: This field is no longer supported. Avoid using it.
|
||||
DeprecatedMaxBlobsPerBlockFulu int `yaml:"MAX_BLOBS_PER_BLOCK_FULU" spec:"true"`
|
||||
|
||||
forkSchedule *NetworkSchedule
|
||||
bpoSchedule *NetworkSchedule
|
||||
networkSchedule *NetworkSchedule
|
||||
}
|
||||
|
||||
func (b *BeaconChainConfig) VersionToForkEpochMap() map[int]primitives.Epoch {
|
||||
@@ -335,22 +347,262 @@ func (b *BeaconChainConfig) ExecutionRequestLimits() enginev1.ExecutionRequestLi
|
||||
}
|
||||
}
|
||||
|
||||
type BlobScheduleEntry struct {
|
||||
Epoch primitives.Epoch `yaml:"EPOCH" json:"EPOCH"`
|
||||
type NetworkScheduleEntry struct {
|
||||
ForkVersion [fieldparams.VersionLength]byte
|
||||
ForkDigest [4]byte
|
||||
MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" json:"MAX_BLOBS_PER_BLOCK"`
|
||||
Epoch primitives.Epoch `yaml:"EPOCH" json:"EPOCH"`
|
||||
BPOEpoch primitives.Epoch
|
||||
VersionEnum int
|
||||
isFork bool
|
||||
}
|
||||
|
||||
func (e NetworkScheduleEntry) LogFields() log.Fields {
|
||||
gvr := BeaconConfig().GenesisValidatorsRoot
|
||||
root, err := computeForkDataRoot(e.ForkVersion, gvr)
|
||||
if err != nil {
|
||||
log.WithField("version", fmt.Sprintf("%#x", e.ForkVersion)).
|
||||
WithField("genesisValidatorsRoot", fmt.Sprintf("%#x", gvr)).
|
||||
WithError(err).Error("Failed to compute fork data root")
|
||||
}
|
||||
fields := log.Fields{
|
||||
"forkVersion": fmt.Sprintf("%#x", e.ForkVersion),
|
||||
"forkDigest": fmt.Sprintf("%#x", e.ForkDigest),
|
||||
"maxBlobsPerBlock": e.MaxBlobsPerBlock,
|
||||
"epoch": e.Epoch,
|
||||
"bpoEpoch": e.BPOEpoch,
|
||||
"isFork": e.isFork,
|
||||
"forkEnum": version.String(e.VersionEnum),
|
||||
"sanity": fmt.Sprintf("%#x", root),
|
||||
"gvr": fmt.Sprintf("%#x", gvr),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
type BlobScheduleEntry NetworkScheduleEntry
|
||||
|
||||
func (b *BeaconChainConfig) ApplyOptions(opts ...Option) {
|
||||
for _, opt := range opts {
|
||||
opt(b)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this needs to be able to return an error
|
||||
// InitializeForkSchedule initializes the schedules forks baked into the config.
|
||||
func (b *BeaconChainConfig) InitializeForkSchedule() {
|
||||
// Reset Fork Version Schedule.
|
||||
b.ForkVersionSchedule = configForkSchedule(b)
|
||||
b.ForkVersionNames = configForkNames(b)
|
||||
b.forkSchedule = initForkSchedule(b)
|
||||
b.bpoSchedule = initBPOSchedule(b)
|
||||
combined := b.forkSchedule.merge(b.bpoSchedule)
|
||||
if err := combined.prepare(b); err != nil {
|
||||
log.WithError(err).Error("Failed to prepare network schedule")
|
||||
}
|
||||
b.networkSchedule = combined
|
||||
}
|
||||
|
||||
func LogDigests(b *BeaconChainConfig) {
|
||||
schedule := b.networkSchedule
|
||||
for _, e := range schedule.entries {
|
||||
log.WithFields(e.LogFields()).Debug("Network schedule entry initialized")
|
||||
digests := make([]string, 0, len(schedule.byDigest))
|
||||
for k := range schedule.byDigest {
|
||||
digests = append(digests, fmt.Sprintf("%#x", k))
|
||||
}
|
||||
log.WithField("digest_keys", strings.Join(digests, ", ")).Debug("Digests seen")
|
||||
}
|
||||
}
|
||||
|
||||
type NetworkSchedule struct {
|
||||
entries []NetworkScheduleEntry
|
||||
byEpoch map[primitives.Epoch]*NetworkScheduleEntry
|
||||
byVersion map[[fieldparams.VersionLength]byte]*NetworkScheduleEntry
|
||||
byDigest map[[4]byte]*NetworkScheduleEntry
|
||||
}
|
||||
|
||||
func newNetworkSchedule(entries []NetworkScheduleEntry) *NetworkSchedule {
|
||||
return &NetworkSchedule{
|
||||
entries: entries,
|
||||
byEpoch: make(map[primitives.Epoch]*NetworkScheduleEntry),
|
||||
byVersion: make(map[[fieldparams.VersionLength]byte]*NetworkScheduleEntry),
|
||||
byDigest: make(map[[4]byte]*NetworkScheduleEntry),
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) epochIdx(epoch primitives.Epoch) int {
|
||||
for i := len(ns.entries) - 1; i >= 0; i-- {
|
||||
if ns.entries[i].Epoch <= epoch {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) Next(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
lastIdx := len(ns.entries) - 1
|
||||
idx := ns.epochIdx(epoch)
|
||||
if idx < 0 {
|
||||
return ns.entries[0]
|
||||
}
|
||||
if idx == lastIdx {
|
||||
return ns.entries[lastIdx]
|
||||
}
|
||||
return ns.entries[idx+1]
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) LastEntry() NetworkScheduleEntry {
|
||||
for i := len(ns.entries) - 1; i >= 0; i-- {
|
||||
if ns.entries[i].Epoch != BeaconConfig().FarFutureEpoch {
|
||||
return ns.entries[i]
|
||||
}
|
||||
}
|
||||
return ns.entries[0]
|
||||
}
|
||||
|
||||
// LastFork is the last full fork (this is used by e2e testing)
|
||||
func (ns *NetworkSchedule) LastFork() NetworkScheduleEntry {
|
||||
for i := len(ns.entries) - 1; i >= 0; i-- {
|
||||
if ns.entries[i].isFork && ns.entries[i].Epoch != BeaconConfig().FarFutureEpoch {
|
||||
return ns.entries[i]
|
||||
}
|
||||
}
|
||||
return ns.entries[0]
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) ForEpoch(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
idx := ns.epochIdx(epoch)
|
||||
if idx < 0 {
|
||||
return ns.entries[0]
|
||||
}
|
||||
if idx >= len(ns.entries)-1 {
|
||||
return ns.entries[len(ns.entries)-1]
|
||||
}
|
||||
return ns.entries[idx]
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) activatedAt(epoch primitives.Epoch) (*NetworkScheduleEntry, bool) {
|
||||
entry, ok := ns.byEpoch[epoch]
|
||||
return entry, ok
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) merge(other *NetworkSchedule) *NetworkSchedule {
|
||||
merged := make([]NetworkScheduleEntry, 0, len(ns.entries)+len(other.entries))
|
||||
merged = append(merged, ns.entries...)
|
||||
merged = append(merged, other.entries...)
|
||||
sort.Slice(merged, func(i, j int) bool {
|
||||
if merged[i].Epoch == merged[j].Epoch {
|
||||
if merged[i].VersionEnum == merged[j].VersionEnum {
|
||||
return merged[i].isFork
|
||||
}
|
||||
return merged[i].VersionEnum < merged[j].VersionEnum
|
||||
}
|
||||
return merged[i].Epoch < merged[j].Epoch
|
||||
})
|
||||
return newNetworkSchedule(merged)
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) index(e NetworkScheduleEntry) {
|
||||
if _, ok := ns.byDigest[e.ForkDigest]; !ok {
|
||||
ns.byDigest[e.ForkDigest] = &e
|
||||
}
|
||||
if _, ok := ns.byVersion[e.ForkVersion]; !ok {
|
||||
ns.byVersion[e.ForkVersion] = &e
|
||||
}
|
||||
if _, ok := ns.byEpoch[e.Epoch]; !ok {
|
||||
ns.byEpoch[e.Epoch] = &e
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) prepare(b *BeaconChainConfig) error {
|
||||
if len(ns.entries) == 0 {
|
||||
return errors.New("cannot compute digests for an empty network schedule")
|
||||
}
|
||||
if !ns.entries[0].isFork {
|
||||
return errors.New("cannot compute digests for a network schedule without a fork entry")
|
||||
}
|
||||
lastFork, err := entryWithForkDigest(ns.entries[0], b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ns.entries[0] = lastFork
|
||||
ns.index(ns.entries[0])
|
||||
var lastBlobs *NetworkScheduleEntry
|
||||
for i := 1; i < len(ns.entries); i++ {
|
||||
entry := ns.entries[i]
|
||||
|
||||
if entry.isFork {
|
||||
lastFork = entry
|
||||
} else {
|
||||
entry.ForkVersion = lastFork.ForkVersion
|
||||
entry.VersionEnum = lastFork.VersionEnum
|
||||
}
|
||||
|
||||
if entry.MaxBlobsPerBlock > 0 || !entry.isFork {
|
||||
entry.BPOEpoch = entry.Epoch
|
||||
lastBlobs = &entry
|
||||
} else if lastBlobs != nil {
|
||||
entry.MaxBlobsPerBlock = lastBlobs.MaxBlobsPerBlock
|
||||
entry.BPOEpoch = lastBlobs.BPOEpoch
|
||||
}
|
||||
|
||||
entry, err = entryWithForkDigest(entry, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ns.entries[i] = entry
|
||||
ns.index(entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func entryWithForkDigest(entry NetworkScheduleEntry, b *BeaconChainConfig) (NetworkScheduleEntry, error) {
|
||||
root, err := computeForkDataRoot(entry.ForkVersion, b.GenesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return entry, err
|
||||
}
|
||||
entry.ForkDigest = to4(root[:])
|
||||
if entry.Epoch < b.FuluForkEpoch {
|
||||
return entry, nil
|
||||
}
|
||||
if entry.MaxBlobsPerBlock > math.MaxUint32 {
|
||||
return entry, fmt.Errorf("max blobs per block exceeds maximum uint32 value")
|
||||
}
|
||||
hb := make([]byte, 16)
|
||||
binary.LittleEndian.PutUint64(hb[0:8], uint64(entry.BPOEpoch))
|
||||
binary.LittleEndian.PutUint64(hb[8:], entry.MaxBlobsPerBlock)
|
||||
bpoHash := hash.Hash(hb)
|
||||
entry.ForkDigest[0] = entry.ForkDigest[0] ^ bpoHash[0]
|
||||
entry.ForkDigest[1] = entry.ForkDigest[1] ^ bpoHash[1]
|
||||
entry.ForkDigest[2] = entry.ForkDigest[2] ^ bpoHash[2]
|
||||
entry.ForkDigest[3] = entry.ForkDigest[3] ^ bpoHash[3]
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
var to4 = bytesutil.ToBytes4
|
||||
|
||||
func initForkSchedule(b *BeaconChainConfig) *NetworkSchedule {
|
||||
return newNetworkSchedule([]NetworkScheduleEntry{
|
||||
{Epoch: b.GenesisEpoch, isFork: true, ForkVersion: to4(b.GenesisForkVersion), VersionEnum: version.Phase0},
|
||||
{Epoch: b.AltairForkEpoch, isFork: true, ForkVersion: to4(b.AltairForkVersion), VersionEnum: version.Altair},
|
||||
{Epoch: b.BellatrixForkEpoch, isFork: true, ForkVersion: to4(b.BellatrixForkVersion), VersionEnum: version.Bellatrix},
|
||||
{Epoch: b.CapellaForkEpoch, isFork: true, ForkVersion: to4(b.CapellaForkVersion), VersionEnum: version.Capella},
|
||||
{Epoch: b.DenebForkEpoch, isFork: true, ForkVersion: to4(b.DenebForkVersion), MaxBlobsPerBlock: uint64(b.DeprecatedMaxBlobsPerBlock), VersionEnum: version.Deneb},
|
||||
{Epoch: b.ElectraForkEpoch, isFork: true, ForkVersion: to4(b.ElectraForkVersion), MaxBlobsPerBlock: uint64(b.DeprecatedMaxBlobsPerBlockElectra), VersionEnum: version.Electra},
|
||||
{Epoch: b.FuluForkEpoch, isFork: true, ForkVersion: to4(b.FuluForkVersion), VersionEnum: version.Fulu},
|
||||
})
|
||||
}
|
||||
|
||||
func initBPOSchedule(b *BeaconChainConfig) *NetworkSchedule {
|
||||
sort.Slice(b.BlobSchedule, func(i, j int) bool {
|
||||
return b.BlobSchedule[i].Epoch < b.BlobSchedule[j].Epoch
|
||||
})
|
||||
entries := make([]NetworkScheduleEntry, len(b.BlobSchedule))
|
||||
for i := range b.BlobSchedule {
|
||||
entries[i] = NetworkScheduleEntry(b.BlobSchedule[i])
|
||||
entries[i].BPOEpoch = entries[i].Epoch
|
||||
}
|
||||
return newNetworkSchedule(entries)
|
||||
}
|
||||
|
||||
func configForkSchedule(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]primitives.Epoch {
|
||||
|
||||
@@ -2,6 +2,7 @@ package params_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
// Test cases can be executed in an arbitrary order. TestOverrideBeaconConfigTestTeardown checks
|
||||
@@ -185,3 +187,82 @@ func Test_TargetBlobCount(t *testing.T) {
|
||||
require.Equal(t, cfg.TargetBlobsPerBlock(primitives.Slot(cfg.ElectraForkEpoch)*cfg.SlotsPerEpoch), 6)
|
||||
cfg.ElectraForkEpoch = math.MaxUint64
|
||||
}
|
||||
|
||||
func fillGVR(value byte) [32]byte {
|
||||
var gvr [32]byte
|
||||
for i := 0; i < len(gvr); i++ {
|
||||
gvr[i] = value
|
||||
}
|
||||
return gvr
|
||||
}
|
||||
|
||||
func TestEntryWithForkDigest(t *testing.T) {
|
||||
var zero [32]byte
|
||||
one := fillGVR(byte(1))
|
||||
two := fillGVR(byte(2))
|
||||
three := fillGVR(byte(3))
|
||||
configs := map[[32]byte]*params.BeaconChainConfig{
|
||||
zero: testConfigForSchedule(zero),
|
||||
one: testConfigForSchedule(one),
|
||||
two: testConfigForSchedule(two),
|
||||
three: testConfigForSchedule(three),
|
||||
}
|
||||
for _, cfg := range configs {
|
||||
cfg.InitializeForkSchedule()
|
||||
}
|
||||
cases := []struct {
|
||||
epoch primitives.Epoch
|
||||
gvr [32]byte
|
||||
expected string
|
||||
}{
|
||||
{epoch: 9, expected: "0x97b2c268"},
|
||||
{epoch: 10, expected: "0x97b2c268"},
|
||||
{epoch: 11, expected: "0x97b2c268"},
|
||||
{epoch: 99, expected: "0x97b2c268"},
|
||||
{epoch: 100, expected: "0x44a571e8"},
|
||||
{epoch: 101, expected: "0x44a571e8"},
|
||||
{epoch: 150, expected: "0x1171afca"},
|
||||
{epoch: 199, expected: "0x1171afca"},
|
||||
{epoch: 200, expected: "0x427a30ab"},
|
||||
{epoch: 201, expected: "0x427a30ab"},
|
||||
{epoch: 250, expected: "0xd5310ef1"},
|
||||
{epoch: 299, expected: "0xd5310ef1"},
|
||||
{epoch: 300, expected: "0x51d229f7"},
|
||||
{epoch: 301, expected: "0x51d229f7"},
|
||||
{epoch: 9, gvr: fillGVR(byte(1)), expected: "0x4a5c3011"},
|
||||
{epoch: 9, gvr: fillGVR(byte(2)), expected: "0xe8332b52"},
|
||||
{epoch: 9, gvr: fillGVR(byte(3)), expected: "0x0e38e75e"},
|
||||
{epoch: 100, gvr: fillGVR(byte(1)), expected: "0xbfe98545"},
|
||||
{epoch: 100, gvr: fillGVR(byte(2)), expected: "0x9b7e4788"},
|
||||
{epoch: 100, gvr: fillGVR(byte(3)), expected: "0x8b5ce4af"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("%d_%s", c.epoch, c.expected), func(t *testing.T) {
|
||||
var expected [4]byte
|
||||
err := hexutil.UnmarshalFixedText("ForkDigest", []byte(c.expected), expected[:])
|
||||
require.NoError(t, err)
|
||||
cfg := configs[c.gvr]
|
||||
digest := params.ForkDigestUsingConfig(c.epoch, cfg)
|
||||
require.Equal(t, expected, digest)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigForSchedule(gvr [32]byte) *params.BeaconChainConfig {
|
||||
cfg := params.MinimalSpecConfig().Copy()
|
||||
cfg.AltairForkEpoch = 0
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
cfg.CapellaForkEpoch = 0
|
||||
cfg.DenebForkEpoch = 0
|
||||
cfg.ElectraForkEpoch = 9
|
||||
cfg.FuluForkEpoch = 100
|
||||
cfg.GenesisValidatorsRoot = gvr
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 100, MaxBlobsPerBlock: 100},
|
||||
{Epoch: 150, MaxBlobsPerBlock: 175},
|
||||
{Epoch: 200, MaxBlobsPerBlock: 200},
|
||||
{Epoch: 250, MaxBlobsPerBlock: 275},
|
||||
{Epoch: 300, MaxBlobsPerBlock: 300},
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ func BeaconConfig() *BeaconChainConfig {
|
||||
// OverrideBeaconConfig(c). Any subsequent calls to params.BeaconConfig() will
|
||||
// return this new configuration.
|
||||
func OverrideBeaconConfig(c *BeaconChainConfig) {
|
||||
c.InitializeForkSchedule()
|
||||
cfgrw.Lock()
|
||||
defer cfgrw.Unlock()
|
||||
configs.active = c
|
||||
|
||||
@@ -16,6 +16,7 @@ func BeaconConfig() *BeaconChainConfig {
|
||||
// OverrideBeaconConfig(c). Any subsequent calls to params.BeaconConfig() will
|
||||
// return this new configuration.
|
||||
func OverrideBeaconConfig(c *BeaconChainConfig) {
|
||||
c.InitializeForkSchedule()
|
||||
configs.active = c
|
||||
}
|
||||
|
||||
|
||||
10
config/params/errors.go
Normal file
10
config/params/errors.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package params
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// ErrVersionNotFound indicates the config package couldn't determine the version for an epoch using the fork schedule.
|
||||
var ErrVersionNotFound = errors.New("could not find an entry in the fork schedule")
|
||||
|
||||
// ErrNoPreviousVersion indicates that a version prior to the given version could not be found, because the given version
|
||||
// is the first one in the list
|
||||
var ErrNoPreviousVersion = errors.New("no previous version")
|
||||
3
config/params/export_test.go
Normal file
3
config/params/export_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package params
|
||||
|
||||
var ComputeForkDataRoot = computeForkDataRoot
|
||||
104
config/params/fork.go
Normal file
104
config/params/fork.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package params
|
||||
|
||||
import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DigestChangesAfter checks if an allotted fork is in the following epoch.
|
||||
func DigestChangesAfter(e primitives.Epoch) bool {
|
||||
_, ok := BeaconConfig().networkSchedule.activatedAt(e + 1)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ForkDigestUsingConfig retrieves the fork digest from the current schedule determined
|
||||
// by the provided epoch.
|
||||
func ForkDigestUsingConfig(epoch primitives.Epoch, cfg *BeaconChainConfig) [4]byte {
|
||||
entry := cfg.networkSchedule.ForEpoch(epoch)
|
||||
return entry.ForkDigest
|
||||
}
|
||||
|
||||
func ForkDigest(epoch primitives.Epoch) [4]byte {
|
||||
return ForkDigestUsingConfig(epoch, BeaconConfig())
|
||||
}
|
||||
|
||||
func computeForkDataRoot(version [fieldparams.VersionLength]byte, root [32]byte) ([32]byte, error) {
|
||||
r, err := (ðpb.ForkData{
|
||||
CurrentVersion: version[:],
|
||||
GenesisValidatorsRoot: root[:],
|
||||
}).HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Fork returns the fork version for the given epoch.
|
||||
func Fork(epoch primitives.Epoch) (*ethpb.Fork, error) {
|
||||
cfg := BeaconConfig()
|
||||
return ForkFromConfig(cfg, epoch), nil
|
||||
}
|
||||
|
||||
func ForkFromConfig(cfg *BeaconChainConfig, epoch primitives.Epoch) *ethpb.Fork {
|
||||
current := cfg.networkSchedule.ForEpoch(epoch)
|
||||
previous := current
|
||||
if current.Epoch > 0 {
|
||||
previous = cfg.networkSchedule.ForEpoch(current.Epoch - 1)
|
||||
}
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: previous.ForkVersion[:],
|
||||
CurrentVersion: current.ForkVersion[:],
|
||||
Epoch: current.Epoch,
|
||||
}
|
||||
}
|
||||
|
||||
// ForkDataFromDigest performs the inverse, where it tries to determine the fork version
|
||||
// and epoch from a provided digest by looping through our current fork schedule.
|
||||
func ForkDataFromDigest(digest [4]byte) ([fieldparams.VersionLength]byte, primitives.Epoch, error) {
|
||||
cfg := BeaconConfig()
|
||||
entry, ok := cfg.networkSchedule.byDigest[digest]
|
||||
if !ok {
|
||||
return [fieldparams.VersionLength]byte{}, 0, errors.Errorf("no fork exists for a digest of %#x", digest)
|
||||
}
|
||||
return entry.ForkVersion, entry.Epoch, nil
|
||||
}
|
||||
|
||||
// NextForkData retrieves the next fork data according to the
|
||||
// provided current epoch.
|
||||
func NextForkData(epoch primitives.Epoch) ([fieldparams.VersionLength]byte, primitives.Epoch) {
|
||||
entry := BeaconConfig().networkSchedule.Next(epoch)
|
||||
return entry.ForkVersion, entry.Epoch
|
||||
}
|
||||
|
||||
func NextNetworkScheduleEntry(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
entry := BeaconConfig().networkSchedule.Next(epoch)
|
||||
return entry
|
||||
}
|
||||
|
||||
func SortedNetworkScheduleEntries() []NetworkScheduleEntry {
|
||||
return BeaconConfig().networkSchedule.entries
|
||||
}
|
||||
|
||||
func SortedForkSchedule() []NetworkScheduleEntry {
|
||||
entries := BeaconConfig().networkSchedule.entries
|
||||
schedule := make([]NetworkScheduleEntry, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if entry.isFork {
|
||||
schedule = append(schedule, entry)
|
||||
}
|
||||
}
|
||||
return schedule
|
||||
}
|
||||
|
||||
// LastForkEpoch returns the last valid fork epoch that exists in our
|
||||
// fork schedule.
|
||||
func LastForkEpoch() primitives.Epoch {
|
||||
return BeaconConfig().networkSchedule.LastFork().Epoch
|
||||
}
|
||||
|
||||
func GetNetworkScheduleEntry(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
entry := BeaconConfig().networkSchedule.ForEpoch(epoch)
|
||||
return entry
|
||||
}
|
||||
188
config/params/fork_test.go
Normal file
188
config/params/fork_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package params_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
targetEpoch primitives.Epoch
|
||||
want *ethpb.Fork
|
||||
wantErr bool
|
||||
setConfg func()
|
||||
}{
|
||||
{
|
||||
name: "genesis fork",
|
||||
targetEpoch: 0,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.GenesisEpoch,
|
||||
CurrentVersion: cfg.GenesisForkVersion,
|
||||
PreviousVersion: cfg.GenesisForkVersion,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair on fork",
|
||||
targetEpoch: cfg.AltairForkEpoch,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.AltairForkEpoch,
|
||||
CurrentVersion: cfg.AltairForkVersion,
|
||||
PreviousVersion: cfg.GenesisForkVersion,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair post fork",
|
||||
targetEpoch: cfg.CapellaForkEpoch + 1,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.CapellaForkEpoch,
|
||||
CurrentVersion: cfg.CapellaForkVersion,
|
||||
PreviousVersion: cfg.BellatrixForkVersion,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 forks, pre-fork",
|
||||
targetEpoch: cfg.ElectraForkEpoch - 1,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.DenebForkEpoch,
|
||||
CurrentVersion: cfg.DenebForkVersion,
|
||||
PreviousVersion: cfg.CapellaForkVersion,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 forks, on fork",
|
||||
targetEpoch: cfg.ElectraForkEpoch,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.ElectraForkEpoch,
|
||||
CurrentVersion: cfg.ElectraForkVersion,
|
||||
PreviousVersion: cfg.DenebForkVersion,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
copied := cfg.Copy()
|
||||
params.OverrideBeaconConfig(copied)
|
||||
got, err := params.Fork(tt.targetEpoch)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Fork() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Fork() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetrieveForkDataFromDigest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
digest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
version, epoch, err := params.ForkDataFromDigest(digest)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().AltairForkVersion), version)
|
||||
require.Equal(t, params.BeaconConfig().AltairForkEpoch, epoch)
|
||||
}
|
||||
|
||||
func TestIsForkNextEpoch(t *testing.T) {
|
||||
// at
|
||||
assert.Equal(t, false, params.DigestChangesAfter(params.BeaconConfig().ElectraForkEpoch))
|
||||
// just before
|
||||
assert.Equal(t, true, params.DigestChangesAfter(params.BeaconConfig().ElectraForkEpoch-1))
|
||||
// just after
|
||||
assert.Equal(t, false, params.DigestChangesAfter(params.BeaconConfig().ElectraForkEpoch+1))
|
||||
}
|
||||
|
||||
func TestNextForkData(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
cfg := params.BeaconConfig()
|
||||
require.Equal(t, true, params.LastForkEpoch() < cfg.FarFutureEpoch)
|
||||
tests := []struct {
|
||||
name string
|
||||
setConfg func()
|
||||
currEpoch primitives.Epoch
|
||||
wantedForkVersion [4]byte
|
||||
wantedEpoch primitives.Epoch
|
||||
}{
|
||||
{
|
||||
name: "genesis",
|
||||
currEpoch: 0,
|
||||
wantedForkVersion: [4]byte(cfg.AltairForkVersion),
|
||||
wantedEpoch: cfg.AltairForkEpoch,
|
||||
},
|
||||
{
|
||||
name: "altair pre-fork",
|
||||
currEpoch: cfg.AltairForkEpoch - 1,
|
||||
wantedForkVersion: [4]byte(cfg.AltairForkVersion),
|
||||
wantedEpoch: cfg.AltairForkEpoch,
|
||||
},
|
||||
{
|
||||
name: "altair on fork",
|
||||
currEpoch: cfg.AltairForkEpoch,
|
||||
wantedForkVersion: [4]byte(cfg.BellatrixForkVersion),
|
||||
wantedEpoch: cfg.BellatrixForkEpoch,
|
||||
},
|
||||
|
||||
{
|
||||
name: "altair post fork",
|
||||
currEpoch: cfg.AltairForkEpoch + 1,
|
||||
wantedForkVersion: [4]byte(cfg.BellatrixForkVersion),
|
||||
wantedEpoch: cfg.BellatrixForkEpoch,
|
||||
},
|
||||
{
|
||||
name: "after last bpo - should be far future epoch and 0x00000000",
|
||||
currEpoch: params.LastForkEpoch() + 1,
|
||||
wantedForkVersion: [4]byte(cfg.FuluForkVersion),
|
||||
wantedEpoch: cfg.FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg.Copy())
|
||||
fVersion, fEpoch := params.NextForkData(tt.currEpoch)
|
||||
if fVersion != tt.wantedForkVersion {
|
||||
t.Errorf("NextForkData() fork version = %v, want %v", fVersion, tt.wantedForkVersion)
|
||||
}
|
||||
if fEpoch != tt.wantedEpoch {
|
||||
t.Errorf("NextForkData() fork epoch = %v, want %v", fEpoch, tt.wantedEpoch)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastForkEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
require.Equal(t, cfg.ElectraForkEpoch, params.LastForkEpoch())
|
||||
}
|
||||
|
||||
func TestForkFromConfig_UsesPassedConfig(t *testing.T) {
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.AltairForkVersion = []byte{0x02, 0x00, 0x00, 0x00}
|
||||
testCfg.GenesisForkVersion = []byte{0x03, 0x00, 0x00, 0x00}
|
||||
testCfg.AltairForkEpoch = 100
|
||||
testCfg.InitializeForkSchedule()
|
||||
|
||||
// Test at Altair fork epoch - should use the passed config's versions
|
||||
fork := params.ForkFromConfig(testCfg, testCfg.AltairForkEpoch)
|
||||
|
||||
want := ðpb.Fork{
|
||||
Epoch: testCfg.AltairForkEpoch,
|
||||
CurrentVersion: testCfg.AltairForkVersion,
|
||||
PreviousVersion: testCfg.GenesisForkVersion,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(fork, want) {
|
||||
t.Errorf("ForkFromConfig() got = %v, want %v", fork, want)
|
||||
}
|
||||
}
|
||||
@@ -11,6 +11,6 @@ type Option func(*BeaconChainConfig)
|
||||
func WithGenesisValidatorsRoot(gvr [32]byte) Option {
|
||||
return func(cfg *BeaconChainConfig) {
|
||||
cfg.GenesisValidatorsRoot = gvr
|
||||
log.WithField("genesis_validators_root", fmt.Sprintf("%#x", gvr)).Info("Overriding genesis validators root")
|
||||
log.WithField("genesis_validators_root", fmt.Sprintf("%#x", gvr)).Info("Setting genesis validators root")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user