mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
10 Commits
peerdas-ge
...
fusaka-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cdd44dd0b4 | ||
|
|
edfa363c47 | ||
|
|
88b480fd45 | ||
|
|
448903a05e | ||
|
|
b3a1d47f3d | ||
|
|
fbcb686636 | ||
|
|
0f5509119b | ||
|
|
4086b9c6c3 | ||
|
|
49c607ea84 | ||
|
|
fc78ad7c5b |
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -182,6 +182,7 @@ go_test(
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -624,6 +625,7 @@ func Test_hashForGenesisRoot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
|
||||
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
|
||||
root, err := beaconDB.GenesisBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -309,6 +310,7 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
block: wba,
|
||||
}
|
||||
|
||||
genesis.StoreStateDuringTest(t, st)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := &fcuConfig{
|
||||
@@ -403,6 +405,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -1980,14 +1981,15 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
genesisState, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
gb := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(gb)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
genesisRoot, err := gb.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genesisState)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
@@ -219,17 +218,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
defer s.removeStartupState()
|
||||
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := s.startFromExecutionChain(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
@@ -278,6 +269,9 @@ func (s *Service) Status() error {
|
||||
|
||||
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if state.IsNil(saved) {
|
||||
return errors.New("Last finalized state at startup is nil")
|
||||
}
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = saved.GenesisTime()
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
@@ -379,62 +373,6 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startFromExecutionChain() error {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
return errors.New("not configured execution chain")
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case e := <-stateChannel:
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("Event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state forRoot failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not initialize beacon chain")
|
||||
}
|
||||
// We start a counter to genesis, if needed.
|
||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not hash tree root genesis state")
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("Failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -51,6 +52,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
srv.Stop()
|
||||
})
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
@@ -71,20 +73,22 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err = execution.NewService(
|
||||
ctx,
|
||||
execution.WithDatabase(beaconDB),
|
||||
execution.WithHttpEndpoint(endpoint),
|
||||
execution.WithDepositContractAddress(common.Address{}),
|
||||
execution.WithDepositCache(depositCache),
|
||||
)
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
@@ -396,24 +400,6 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
mgs := &MockClockSetter{}
|
||||
service.clockSetter = mgs
|
||||
gt := time.Now()
|
||||
service.onExecutionChainStart(t.Context(), gt)
|
||||
gs, err := beaconDB.GenesisState(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, nil, gs)
|
||||
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
|
||||
var zero [32]byte
|
||||
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
|
||||
require.Equal(t, gt, mgs.G.GenesisTime())
|
||||
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
ctx := b.Context()
|
||||
s := testServiceWithDB(b)
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -98,7 +97,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -119,7 +118,7 @@ func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, h
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"domain.go",
|
||||
"signature.go",
|
||||
"signing_root.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
|
||||
@@ -25,7 +24,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"domain_test.go",
|
||||
"signature_test.go",
|
||||
"signing_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
// Per spec, we want the fork version and genesis validator to be nil.
|
||||
// Which is genesis value and zero by default.
|
||||
sd, err := ComputeDomain(
|
||||
d,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package signing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(reg, domain)
|
||||
require.NoError(t, err)
|
||||
sk.Sign(sr[:]).Marshal()
|
||||
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
|
||||
}
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/beacon-chain:__subpackages__",
|
||||
"//genesis:__subpackages__",
|
||||
"//testing/slasher/simulator:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
|
||||
@@ -39,7 +39,6 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -51,6 +50,7 @@ go_library(
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
@@ -107,7 +107,6 @@ go_test(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -117,6 +116,8 @@ go_test(
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//genesis/embedded:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
dbIface "github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -97,8 +98,22 @@ func (s *Store) EnsureEmbeddedGenesis(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if gs != nil && !gs.IsNil() {
|
||||
if !state.IsNil(gs) {
|
||||
return s.SaveGenesisData(ctx, gs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LegacyGenesisProvider struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
func NewLegacyGenesisProvider(store *Store) *LegacyGenesisProvider {
|
||||
return &LegacyGenesisProvider{store: store}
|
||||
}
|
||||
|
||||
var _ genesis.Provider = &LegacyGenesisProvider{}
|
||||
|
||||
func (p *LegacyGenesisProvider) Genesis(ctx context.Context) (state.BeaconState, error) {
|
||||
return p.store.LegacyGenesisState(ctx)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -152,6 +153,7 @@ func TestEnsureEmbeddedGenesis(t *testing.T) {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
ctx := t.Context()
|
||||
db := setupDB(t)
|
||||
|
||||
|
||||
@@ -6,14 +6,12 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
|
||||
statenative "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -65,21 +63,21 @@ func (s *Store) StateOrError(ctx context.Context, blockRoot [32]byte) (state.Bea
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// GenesisState returns the genesis state in beacon chain.
|
||||
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisState")
|
||||
st, err := genesis.State()
|
||||
if errors.Is(err, genesis.ErrGenesisStateNotInitialized) {
|
||||
log.WithError(err).Error("genesis state not initialized, returning nil state. this should only happen in tests")
|
||||
return nil, nil
|
||||
}
|
||||
return st, err
|
||||
}
|
||||
|
||||
// GenesisState returns the genesis state in beacon chain.
|
||||
func (s *Store) LegacyGenesisState(ctx context.Context) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LegacyGenesisState")
|
||||
defer span.End()
|
||||
|
||||
cached, err := genesis.State(params.BeaconConfig().ConfigName)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
span.SetAttributes(trace.BoolAttribute("cache_hit", cached != nil))
|
||||
if cached != nil {
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
var st state.BeaconState
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve genesis block's signing root from blocks bucket,
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -488,7 +489,7 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), headRoot))
|
||||
require.NoError(t, db.SaveState(t.Context(), st, headRoot))
|
||||
genesis.StoreStateDuringTest(t, st)
|
||||
|
||||
savedGenesisS, err := db.GenesisState(t.Context())
|
||||
require.NoError(t, err)
|
||||
@@ -661,7 +662,7 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot))
|
||||
genesis.StoreStateDuringTest(t, genesisState)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
|
||||
@@ -3,9 +3,9 @@ package kv
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis/embedded"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
@@ -18,7 +18,7 @@ func TestSaveOrigin(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
db := setupDB(t)
|
||||
|
||||
st, err := genesis.State(params.MainnetName)
|
||||
st, err := embedded.ByName(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := st.MarshalSSZ()
|
||||
|
||||
@@ -125,6 +125,7 @@ go_test(
|
||||
"//contracts/deposit/mock:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//monitoring/clientstats:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
contracts "github.com/OffchainLabs/prysm/v6/contracts/deposit"
|
||||
"github.com/OffchainLabs/prysm/v6/contracts/deposit/mock"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/clientstats"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -381,6 +382,7 @@ func TestInitDepositCache_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), blockRootA))
|
||||
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, blockRootA))
|
||||
genesis.StoreStateDuringTest(t, emptyState)
|
||||
s.chainStartData.Chainstarted = true
|
||||
require.NoError(t, s.initDepositCaches(t.Context(), ctrs))
|
||||
require.Equal(t, 3, len(s.cfg.depositCache.PendingContainers(t.Context(), nil)))
|
||||
@@ -446,6 +448,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), headRoot))
|
||||
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, headRoot))
|
||||
require.NoError(t, stateGen.SaveState(t.Context(), headRoot, emptyState))
|
||||
genesis.StoreStateDuringTest(t, emptyState)
|
||||
s.cfg.stateGen = stateGen
|
||||
require.NoError(t, emptyState.SetEth1DepositIndex(3))
|
||||
|
||||
@@ -594,6 +597,7 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genState)
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
|
||||
_, err = s1.validPowchainData(t.Context())
|
||||
require.NoError(t, err)
|
||||
@@ -655,6 +659,7 @@ func TestService_EnsureValidPowchainData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genState)
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
|
||||
|
||||
err = s1.cfg.beaconDB.SaveExecutionChainData(t.Context(), ðpb.ETH1ChainData{
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"clear_db.go",
|
||||
"config.go",
|
||||
"log.go",
|
||||
"node.go",
|
||||
@@ -49,7 +50,6 @@ go_library(
|
||||
"//beacon-chain/sync/backfill:go_default_library",
|
||||
"//beacon-chain/sync/backfill/coverage:go_default_library",
|
||||
"//beacon-chain/sync/checkpoint:go_default_library",
|
||||
"//beacon-chain/sync/genesis:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
@@ -59,6 +59,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//monitoring/prometheus:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
|
||||
101
beacon-chain/node/clear_db.go
Normal file
101
beacon-chain/node/clear_db.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/slasherkv"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type dbClearer struct {
|
||||
shouldClear bool
|
||||
force bool
|
||||
confirmed bool
|
||||
}
|
||||
|
||||
const (
|
||||
clearConfirmation = "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
|
||||
clearDeclined = "Database will not be deleted. No changes have been made."
|
||||
)
|
||||
|
||||
func (c *dbClearer) clearKV(ctx context.Context, db *kv.Store) (*kv.Store, error) {
|
||||
if !c.shouldProceed() {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
log.Warning("Removing database")
|
||||
if err := db.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
return kv.NewKVStore(ctx, db.DatabasePath())
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearBlobs(bs *filesystem.BlobStorage) error {
|
||||
if !c.shouldProceed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Warning("Removing blob storage")
|
||||
if err := bs.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearColumns(cs *filesystem.DataColumnStorage) error {
|
||||
if !c.shouldProceed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Warning("Removing data columns storage")
|
||||
if err := cs.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear data columns storage")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearSlasher(ctx context.Context, db *slasherkv.Store) (*slasherkv.Store, error) {
|
||||
if !c.shouldProceed() {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
log.Warning("Removing slasher database")
|
||||
if err := db.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear slasher database")
|
||||
}
|
||||
return slasherkv.NewKVStore(ctx, db.DatabasePath())
|
||||
}
|
||||
|
||||
func (c *dbClearer) shouldProceed() bool {
|
||||
if !c.shouldClear {
|
||||
return false
|
||||
}
|
||||
if c.force {
|
||||
return true
|
||||
}
|
||||
if !c.confirmed {
|
||||
confirmed, err := cmd.ConfirmAction(clearConfirmation, clearDeclined)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Not clearing db due to confirmation error")
|
||||
return false
|
||||
}
|
||||
c.confirmed = confirmed
|
||||
}
|
||||
return c.confirmed
|
||||
}
|
||||
|
||||
func newDbClearer(cliCtx *cli.Context) *dbClearer {
|
||||
force := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
return &dbClearer{
|
||||
shouldClear: cliCtx.Bool(cmd.ClearDB.Name) || force,
|
||||
force: force,
|
||||
}
|
||||
}
|
||||
@@ -52,7 +52,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill/coverage"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/checkpoint"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/genesis"
|
||||
initialsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd"
|
||||
@@ -62,6 +61,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/prometheus"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/prereqs"
|
||||
@@ -113,7 +113,7 @@ type BeaconNode struct {
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
GenesisProviders []genesis.Provider
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
@@ -127,6 +127,7 @@ type BeaconNode struct {
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
ConfigOptions []params.Option
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -135,18 +136,13 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
if err := configureBeacon(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set beacon configuration options")
|
||||
}
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
registry := runtime.NewServiceRegistry()
|
||||
ctx := cliCtx.Context
|
||||
|
||||
beacon := &BeaconNode{
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
services: registry,
|
||||
services: runtime.NewServiceRegistry(),
|
||||
stop: make(chan struct{}),
|
||||
stateFeed: new(event.Feed),
|
||||
blockFeed: new(event.Feed),
|
||||
@@ -173,6 +169,25 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
}
|
||||
}
|
||||
|
||||
dbClearer := newDbClearer(cliCtx)
|
||||
dataDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
boltFname := filepath.Join(dataDir, kv.BeaconNodeDbDirName)
|
||||
kvdb, err := openDB(ctx, boltFname, dbClearer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not open database")
|
||||
}
|
||||
beacon.db = kvdb
|
||||
|
||||
providers := append(beacon.GenesisProviders, kv.NewLegacyGenesisProvider(kvdb))
|
||||
if err := genesis.Initialize(ctx, dataDir, providers...); err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
|
||||
beacon.ConfigOptions = append([]params.Option{params.WithGenesisValidatorsRoot(genesis.ValidatorsRoot())}, beacon.ConfigOptions...)
|
||||
params.BeaconConfig().ApplyOptions(beacon.ConfigOptions...)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
params.LogDigests(params.BeaconConfig())
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
beacon.forkChoicer = doublylinkedtree.New()
|
||||
@@ -191,6 +206,9 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
}
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
if err := dbClearer.clearBlobs(beacon.BlobStorage); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if beacon.DataColumnStorage == nil {
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
|
||||
@@ -200,8 +218,11 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
|
||||
beacon.DataColumnStorage = dataColumnStorage
|
||||
}
|
||||
if err := dbClearer.clearColumns(beacon.DataColumnStorage); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
}
|
||||
@@ -289,7 +310,7 @@ func configureBeacon(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
|
||||
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string, clearer *dbClearer) (*backfill.Store, error) {
|
||||
ctx := cliCtx.Context
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
@@ -300,7 +321,7 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start slashing DB")
|
||||
}
|
||||
|
||||
@@ -480,47 +501,6 @@ func (b *BeaconNode) Close() {
|
||||
close(b.stop)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
|
||||
var err error
|
||||
clearDBConfirmed := false
|
||||
|
||||
if clearDB && !forceClearDB {
|
||||
const (
|
||||
actionText = "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
|
||||
deniedText = "Database will not be deleted. No changes have been made."
|
||||
)
|
||||
|
||||
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not confirm action")
|
||||
}
|
||||
}
|
||||
|
||||
if clearDBConfirmed || forceClearDB {
|
||||
log.Warning("Removing database")
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
if err := b.BlobStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if err := b.DataColumnStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
|
||||
knownContract, err := b.db.DepositContractAddress(b.ctx)
|
||||
if err != nil {
|
||||
@@ -544,60 +524,36 @@ func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
var depositCache cache.DepositCache
|
||||
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store, error) {
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(b.ctx, dbPath)
|
||||
d, err := kv.NewKVStore(ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
}
|
||||
|
||||
if clearDBRequired || forceClearDBRequired {
|
||||
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
d, err = clearer.clearKV(ctx, d)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
if err := d.RunMigrations(b.ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return d, d.RunMigrations(ctx)
|
||||
}
|
||||
|
||||
b.db = d
|
||||
|
||||
depositCache, err = depositsnapshot.New()
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
depositCache, err := depositsnapshot.New()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create deposit cache")
|
||||
}
|
||||
|
||||
b.depositCache = depositCache
|
||||
|
||||
if b.GenesisInitializer != nil {
|
||||
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
|
||||
if errors.Is(err, db.ErrExistingGenesisState) {
|
||||
return errors.Errorf("Genesis state flag specified but a genesis state "+
|
||||
"exists already. Run again with --%s and/or ensure you are using the "+
|
||||
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "could not load genesis from file")
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
|
||||
return errors.Wrap(err, "could not ensure embedded genesis")
|
||||
}
|
||||
|
||||
if b.CheckpointInitializer != nil {
|
||||
log.Info("Checkpoint sync - Downloading origin state and block")
|
||||
if err := b.CheckpointInitializer.Initialize(b.ctx, d); err != nil {
|
||||
if err := b.CheckpointInitializer.Initialize(b.ctx, b.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -609,49 +565,25 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
log.WithField("address", depositAddress).Info("Deposit contract")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context, clearer *dbClearer) error {
|
||||
if !b.slasherEnabled {
|
||||
return nil
|
||||
}
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
|
||||
if cliCtx.IsSet(flags.SlasherDirFlag.Name) {
|
||||
baseDir = cliCtx.String(flags.SlasherDirFlag.Name)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clearDBConfirmed := false
|
||||
if clearDB && !forceClearDB {
|
||||
actionText := "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
deniedText := "Database will not be deleted. No changes have been made."
|
||||
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d, err = clearer.clearSlasher(b.ctx, d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not clear slasher database")
|
||||
}
|
||||
if clearDBConfirmed || forceClearDB {
|
||||
log.Warning("Removing database")
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
}
|
||||
|
||||
b.slasherDB = d
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
)
|
||||
|
||||
// Option for beacon node configuration.
|
||||
@@ -51,6 +52,13 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigOptions(opt ...params.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
|
||||
@@ -71,7 +71,6 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
@@ -169,7 +168,6 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
@@ -180,6 +178,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -274,14 +273,8 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return errors.New("attempted to broadcast nil light client optimistic update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -300,13 +293,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return errors.New("attempted to broadcast nil light client finality update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
|
||||
@@ -16,12 +16,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -61,6 +62,7 @@ func TestService_Broadcast(t *testing.T) {
|
||||
topic := "/eth2/%x/testing"
|
||||
// Set a test gossip mapping for testpb.TestSimpleMessage.
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest)
|
||||
@@ -551,9 +553,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
@@ -618,9 +618,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -569,8 +570,10 @@ func (s *Service) createLocalNode(
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
clock := startup.NewClock(s.genesisTime, [32]byte(s.genesisValidatorsRoot))
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
if err := updateENR(localNode, current, next); err != nil {
|
||||
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
|
||||
}
|
||||
|
||||
@@ -685,7 +688,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
// Ignore nodes that don't match our fork digest.
|
||||
nodeENR := node.Record()
|
||||
if s.genesisValidatorsRoot != nil {
|
||||
if err := s.compareForkENR(nodeENR); err != nil {
|
||||
if err := compareForkENR(s.dv5Listener.LocalNode().Node().Record(), nodeENR); err != nil {
|
||||
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -3,19 +3,18 @@ package p2p
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errEth2ENRDigestMismatch = errors.New("fork digest of peer does not match local value")
|
||||
|
||||
// ENR key used for Ethereum consensus-related fork data.
|
||||
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
|
||||
|
||||
@@ -25,29 +24,30 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
if !s.isInitialized() {
|
||||
return [4]byte{}, errors.New("state is not initialized")
|
||||
}
|
||||
return forks.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot)
|
||||
|
||||
clock := startup.NewClock(s.genesisTime, [32]byte(s.genesisValidatorsRoot))
|
||||
return params.ForkDigest(clock.CurrentEpoch()), nil
|
||||
}
|
||||
|
||||
// Compares fork ENRs between an incoming peer's record and our node's
|
||||
// local record values for current and next fork version/epoch.
|
||||
func (s *Service) compareForkENR(record *enr.Record) error {
|
||||
currentRecord := s.dv5Listener.LocalNode().Node().Record()
|
||||
peerForkENR, err := forkEntry(record)
|
||||
func compareForkENR(self, peer *enr.Record) error {
|
||||
peerForkENR, err := forkEntry(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentForkENR, err := forkEntry(currentRecord)
|
||||
currentForkENR, err := forkEntry(self)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enrString, err := SerializeENR(record)
|
||||
enrString, err := SerializeENR(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Clients SHOULD connect to peers with current_fork_digest, next_fork_version,
|
||||
// and next_fork_epoch that match local values.
|
||||
if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) {
|
||||
return fmt.Errorf(
|
||||
return errors.Wrapf(errEth2ENRDigestMismatch,
|
||||
"fork digest of peer with ENR %s: %v, does not match local value: %v",
|
||||
enrString,
|
||||
peerForkENR.CurrentForkDigest,
|
||||
@@ -74,41 +74,36 @@ func (s *Service) compareForkENR(record *enr.Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds a fork entry as an ENR record under the Ethereum consensus EnrKey for
|
||||
// the local node. The fork entry is an ssz-encoded enrForkID type
|
||||
// which takes into account the current fork version from the current
|
||||
// epoch to create a fork digest, the next fork version,
|
||||
// and the next fork epoch.
|
||||
func addForkEntry(
|
||||
node *enode.LocalNode,
|
||||
genesisTime time.Time,
|
||||
genesisValidatorsRoot []byte,
|
||||
) (*enode.LocalNode, error) {
|
||||
digest, err := forks.CreateForkDigest(genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
if prysmTime.Now().Before(genesisTime) {
|
||||
currentEpoch = 0
|
||||
}
|
||||
nextForkVersion, nextForkEpoch, err := forks.NextForkData(currentEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) error {
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion[:],
|
||||
NextForkEpoch: nextForkEpoch,
|
||||
CurrentForkDigest: entry.ForkDigest[:],
|
||||
NextForkVersion: next.ForkVersion[:],
|
||||
NextForkEpoch: next.Epoch,
|
||||
}
|
||||
if entry.Epoch == next.Epoch {
|
||||
enrForkID.NextForkEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"CurrentForkDigest": fmt.Sprintf("%#x", enrForkID.CurrentForkDigest),
|
||||
"NextForkVersion": fmt.Sprintf("%#x", enrForkID.NextForkVersion),
|
||||
"NextForkEpoch": fmt.Sprintf("%d", enrForkID.NextForkEpoch),
|
||||
}
|
||||
if params.BeaconConfig().FuluForkEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if entry.ForkDigest == next.ForkDigest {
|
||||
node.Set(enr.WithEntry(nfdEnrKey, make([]byte, len(next.ForkDigest))))
|
||||
} else {
|
||||
node.Set(enr.WithEntry(nfdEnrKey, next.ForkDigest[:]))
|
||||
}
|
||||
logFields[nfdEnrKey] = fmt.Sprintf("%#x", next.ForkDigest)
|
||||
}
|
||||
log.WithFields(logFields).Info("Updating ENR Fork ID")
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
forkEntry := enr.WithEntry(eth2ENRKey, enc)
|
||||
node.Set(forkEntry)
|
||||
return node, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieves an enrForkID from an ENR record by key lookup
|
||||
|
||||
@@ -8,242 +8,121 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
for i := 1; i <= 5; i++ {
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
// We give every peer a different genesis validators root, which
|
||||
// will cause each peer to have a different ForkDigest, preventing
|
||||
// them from connecting according to our discovery rules for Ethereum consensus.
|
||||
root := make([]byte, 32)
|
||||
copy(root, strconv.Itoa(port))
|
||||
s = &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: root,
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
for _, listener := range listeners {
|
||||
listener.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(discoveryWaitTime)
|
||||
|
||||
lastListener := listeners[len(listeners)-1]
|
||||
nodes := lastListener.Lookup(bootNode.ID())
|
||||
if len(nodes) < 4 {
|
||||
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
|
||||
"Expected more than or equal to %d but got %d", 4, len(nodes))
|
||||
}
|
||||
|
||||
// Now, we start a new p2p service. It should have no peers aside from the
|
||||
// bootnode given all nodes provided by discv5 will have different fork digests.
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
|
||||
// We should not have valid peers if the fork digest mismatched.
|
||||
assert.Equal(t, 0, len(addrs), "Expected 0 valid peers")
|
||||
require.NoError(t, s.Stop())
|
||||
}
|
||||
|
||||
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
func TestCompareForkENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
s := &Service{
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
_, k := createAddrAndPrivKey(t)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
self := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(self, current, next))
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
cases := []struct {
|
||||
name string
|
||||
expectErr error
|
||||
expectLog string
|
||||
node func(t *testing.T) *enode.Node
|
||||
}{
|
||||
{
|
||||
name: "match",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "current digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
testDigest := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, current.ForkDigest, testDigest, "ensure test fork digest is unique")
|
||||
current := current.Copy()
|
||||
current.ForkDigest = testDigest
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errEth2ENRDigestMismatch,
|
||||
},
|
||||
{
|
||||
name: "next fork version mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
testVersion := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, next.ForkVersion, testVersion, "ensure test fork version is unique")
|
||||
next := next.Copy()
|
||||
next.ForkVersion = testVersion
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork version",
|
||||
},
|
||||
{
|
||||
name: "next fork epoch mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
next := next.Copy()
|
||||
next.Epoch = next.Epoch + 1
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork epoch",
|
||||
},
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
for i := 1; i <= 5; i++ {
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
nextForkEpoch := primitives.Epoch(i)
|
||||
c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
// We give every peer a different genesis validators root, which
|
||||
// will cause each peer to have a different ForkDigest, preventing
|
||||
// them from connecting according to our discovery rules for Ethereum consensus.
|
||||
s = &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
peer := c.node(t)
|
||||
err := compareForkENR(self.Node().Record(), peer.Record())
|
||||
if c.expectErr != nil {
|
||||
require.ErrorIs(t, err, c.expectErr, "Expected error to match")
|
||||
} else {
|
||||
require.NoError(t, err, "Expected no error comparing fork ENRs")
|
||||
}
|
||||
if c.expectLog != "" {
|
||||
require.LogsContain(t, hook, c.expectLog, "Expected log message not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
for _, listener := range listeners {
|
||||
listener.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(discoveryWaitTime)
|
||||
|
||||
lastListener := listeners[len(listeners)-1]
|
||||
nodes := lastListener.Lookup(bootNode.ID())
|
||||
if len(nodes) < 4 {
|
||||
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
|
||||
"Expected more than or equal to %d but got %d", 4, len(nodes))
|
||||
}
|
||||
|
||||
// Now, we start a new p2p service. It should have no peers aside from the
|
||||
// bootnode given all nodes provided by discv5 will have different fork digests.
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
cfg.StateNotifier = &mock.MockStateNotifier{}
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
addrs := make([]ma.Multiaddr, 0, len(nodes))
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
t.Error("Expected to have valid peers, got 0")
|
||||
}
|
||||
|
||||
require.LogsContain(t, hook, "Peer matches fork digest but has different next fork epoch")
|
||||
require.NoError(t, s.Stop())
|
||||
}
|
||||
|
||||
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): 0,
|
||||
{0, 0, 0, 1}: 1,
|
||||
}
|
||||
nextForkEpoch := primitives.Epoch(1)
|
||||
nextForkVersion := []byte{0, 0, 0, 1}
|
||||
params.OverrideBeaconConfig(c)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
digest, err := forks.CreateForkDigest(genesisTime, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion,
|
||||
NextForkEpoch: nextForkEpoch,
|
||||
CurrentForkDigest: current.ForkDigest[:],
|
||||
NextForkVersion: next.ForkVersion[:],
|
||||
NextForkEpoch: next.Epoch,
|
||||
}
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
entry := enr.WithEntry(eth2ENRKey, enc)
|
||||
// In epoch 1 of current time, the fork version should be
|
||||
// {0, 0, 0, 1} according to the configuration override above.
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
@@ -255,18 +134,16 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
localNode.Set(entry)
|
||||
|
||||
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, want[:], resp.CurrentForkDigest)
|
||||
assert.DeepEqual(t, nextForkVersion, resp.NextForkVersion)
|
||||
assert.Equal(t, nextForkEpoch, resp.NextForkEpoch, "Unexpected next fork epoch")
|
||||
assert.Equal(t, hexutil.Encode(current.ForkDigest[:]), hexutil.Encode(resp.CurrentForkDigest))
|
||||
assert.Equal(t, hexutil.Encode(next.ForkVersion[:]), hexutil.Encode(resp.NextForkVersion))
|
||||
assert.Equal(t, next.Epoch, resp.NextForkEpoch, "Unexpected next fork epoch")
|
||||
}
|
||||
|
||||
func TestAddForkEntry_Genesis(t *testing.T) {
|
||||
func TestAddForkEntry_NextForkVersion(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
@@ -276,17 +153,27 @@ func TestAddForkEntry_Genesis(t *testing.T) {
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
|
||||
bCfg := params.MainnetConfig()
|
||||
bCfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{}
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] = bCfg.GenesisEpoch
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32))
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
// Add the fork entry to the local node's ENR.
|
||||
require.NoError(t, updateENR(localNode, current, next))
|
||||
fe, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
forkEntry, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t,
|
||||
params.BeaconConfig().GenesisForkVersion, forkEntry.NextForkVersion,
|
||||
assert.Equal(t,
|
||||
hexutil.Encode(params.BeaconConfig().AltairForkVersion), hexutil.Encode(fe.NextForkVersion),
|
||||
"Wanted Next Fork Version to be equal to genesis fork version")
|
||||
|
||||
last := params.LastForkEpoch()
|
||||
current = params.GetNetworkScheduleEntry(last)
|
||||
next = params.NextNetworkScheduleEntry(last)
|
||||
require.NoError(t, updateENR(localNode, current, next))
|
||||
entry := params.NextNetworkScheduleEntry(last)
|
||||
fe, err = forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
hexutil.Encode(entry.ForkVersion[:]), hexutil.Encode(fe.NextForkVersion),
|
||||
"Wanted Next Fork Version to be equal to last entry in schedule")
|
||||
|
||||
}
|
||||
|
||||
@@ -10,26 +10,22 @@ import (
|
||||
// changes.
|
||||
func (s *Service) forkWatcher() {
|
||||
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
var scheduleEntry params.NetworkScheduleEntry
|
||||
for {
|
||||
select {
|
||||
case currSlot := <-slotTicker.C():
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().DenebForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().ElectraForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().FuluForkEpoch {
|
||||
// If we are in the fork epoch, we update our enr with
|
||||
// the updated fork digest. These repeatedly does
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
// but is fine nonetheless.
|
||||
if s.dv5Listener != nil { // make sure it's not a local network
|
||||
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add fork entry")
|
||||
}
|
||||
if s.dv5Listener == nil {
|
||||
continue // TODO: Should forkWatcher run at all if we're in "local" mode?
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(currSlot)
|
||||
newEntry := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
if newEntry.ForkDigest != scheduleEntry.ForkDigest {
|
||||
nextEntry := params.NextNetworkScheduleEntry(currentEpoch)
|
||||
if err := updateENR(s.dv5Listener.LocalNode(), newEntry, nextEntry); err != nil {
|
||||
log.WithFields(newEntry.LogFields()).WithError(err).Error("Could not add fork entry")
|
||||
continue // don't replace scheduleEntry until this succeeds
|
||||
}
|
||||
scheduleEntry = newEntry
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
@@ -39,7 +38,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
}
|
||||
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
|
||||
_, fEpoch, err := params.ForkDataFromDigest(digest)
|
||||
if err != nil {
|
||||
// Impossible condition that should
|
||||
// never be hit.
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/golang/snappy"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -18,28 +18,27 @@ import (
|
||||
|
||||
func TestMsgID_HashesCorrectly(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), bytesutil.ToBytes32([]byte{'A'}))
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
d := params.ForkDigest(clock.CurrentEpoch())
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
|
||||
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
|
||||
hashedData := hash.Hash(append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
|
||||
hashedData = hash.Hash(append(params.BeaconConfig().MessageDomainValidSnappy[:], validObj[:]...))
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genesisValidatorsRoot)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
@@ -52,7 +51,7 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
combinedObj = append(combinedObj, pMsg.Data...)
|
||||
hashedData := hash.Hash(combinedObj)
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
@@ -63,13 +62,12 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
combinedObj = append(combinedObj, validObj[:]...)
|
||||
hashedData = hash.Hash(combinedObj)
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
@@ -82,7 +80,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
combinedObj = append(combinedObj, pMsg.Data...)
|
||||
hashedData := hash.Hash(combinedObj)
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
@@ -93,7 +91,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
combinedObj = append(combinedObj, validObj[:]...)
|
||||
hashedData = hash.Hash(combinedObj)
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMsgID_WithNilTopic(t *testing.T) {
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
rSubD = 8 // random gossip target
|
||||
)
|
||||
|
||||
var errInvalidTopic = errors.New("invalid topic format")
|
||||
var ErrInvalidTopic = errors.New("invalid topic format")
|
||||
|
||||
// Specifies the fixed size context length.
|
||||
const digestLength = 4
|
||||
@@ -219,12 +219,12 @@ func convertTopicScores(topicMap map[string]*pubsub.TopicScoreSnapshot) map[stri
|
||||
func ExtractGossipDigest(topic string) ([4]byte, error) {
|
||||
// Ensure the topic prefix is correct.
|
||||
if len(topic) < len(gossipTopicPrefix)+1 || topic[:len(gossipTopicPrefix)] != gossipTopicPrefix {
|
||||
return [4]byte{}, errInvalidTopic
|
||||
return [4]byte{}, ErrInvalidTopic
|
||||
}
|
||||
start := len(gossipTopicPrefix)
|
||||
end := strings.Index(topic[start:], "/")
|
||||
if end == -1 { // Ensure a topic suffix exists.
|
||||
return [4]byte{}, errInvalidTopic
|
||||
return [4]byte{}, ErrInvalidTopic
|
||||
}
|
||||
end += start
|
||||
strDigest := topic[start:end]
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -32,6 +32,14 @@ var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
|
||||
const pubsubSubscriptionRequestLimit = 500
|
||||
|
||||
func (s *Service) setAllForkDigests() {
|
||||
entries := params.SortedNetworkScheduleEntries()
|
||||
s.allForkDigests = make(map[[4]byte]struct{}, len(entries))
|
||||
for _, entry := range entries {
|
||||
s.allForkDigests[entry.ForkDigest] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
if !s.isInitialized() {
|
||||
@@ -48,50 +56,18 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
if parts[1] != "eth2" {
|
||||
return false
|
||||
}
|
||||
phase0ForkDigest, err := s.currentForkDigest()
|
||||
|
||||
var digest [4]byte
|
||||
dl, err := hex.Decode(digest[:], []byte(parts[2]))
|
||||
if err == nil && dl != 4 {
|
||||
err = fmt.Errorf("expected 4 bytes, got %d", dl)
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine fork digest")
|
||||
log.WithError(err).WithField("topic", topic).WithField("digest", parts[2]).Error("CanSubscribe failed to parse message")
|
||||
return false
|
||||
}
|
||||
altairForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine altair fork digest")
|
||||
return false
|
||||
}
|
||||
bellatrixForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Bellatrix fork digest")
|
||||
return false
|
||||
}
|
||||
capellaForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Deneb fork digest")
|
||||
return false
|
||||
}
|
||||
electraForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Electra fork digest")
|
||||
return false
|
||||
}
|
||||
fuluForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Fulu fork digest")
|
||||
return false
|
||||
}
|
||||
switch parts[2] {
|
||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||
case fmt.Sprintf("%x", altairForkDigest):
|
||||
case fmt.Sprintf("%x", bellatrixForkDigest):
|
||||
case fmt.Sprintf("%x", capellaForkDigest):
|
||||
case fmt.Sprintf("%x", denebForkDigest):
|
||||
case fmt.Sprintf("%x", electraForkDigest):
|
||||
case fmt.Sprintf("%x", fuluForkDigest):
|
||||
default:
|
||||
if _, ok := s.allForkDigests[digest]; !ok {
|
||||
log.WithField("topic", topic).WithField("digest", fmt.Sprintf("%#x", digest)).Error("CanSubscribe failed to find digest in allForkDigests")
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -21,12 +19,11 @@ import (
|
||||
|
||||
func TestService_CanSubscribe(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
currentFork := params.GetNetworkScheduleEntry(clock.CurrentEpoch()).ForkDigest
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
type test struct {
|
||||
name string
|
||||
topic string
|
||||
@@ -108,12 +105,14 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
}
|
||||
tests = append(tests, tt)
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: genesisTime,
|
||||
genesisTime: clock.GenesisTime(),
|
||||
}
|
||||
s.setAllForkDigests()
|
||||
if got := s.CanSubscribe(tt.topic); got != tt.want {
|
||||
t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want)
|
||||
}
|
||||
@@ -219,11 +218,10 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
|
||||
|
||||
func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
type args struct {
|
||||
id peer.ID
|
||||
subs []*pubsubpb.RPC_SubOpts
|
||||
@@ -320,12 +318,14 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: genesisTime,
|
||||
genesisTime: clock.GenesisTime(),
|
||||
}
|
||||
s.setAllForkDigests()
|
||||
got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("FilterIncomingSubscriptions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -92,6 +93,8 @@ type Service struct {
|
||||
custodyInfoMut sync.RWMutex // Protects custodyGroupCount and earliestAvailableSlot
|
||||
custodyGroupCount uint64
|
||||
earliestAvailableSlot primitives.Slot
|
||||
clock *startup.Clock
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
}
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
@@ -196,6 +199,7 @@ func (s *Service) Start() {
|
||||
// Waits until the state is initialized via an event feed.
|
||||
// Used for fork-related data when connecting peers.
|
||||
s.awaitStateInitialized()
|
||||
s.setAllForkDigests()
|
||||
s.isPreGenesis = false
|
||||
|
||||
var relayNodes []string
|
||||
@@ -449,7 +453,7 @@ func (s *Service) awaitStateInitialized() {
|
||||
s.genesisTime = clock.GenesisTime()
|
||||
gvr := clock.GenesisValidatorsRoot()
|
||||
s.genesisValidatorsRoot = gvr[:]
|
||||
_, err = s.currentForkDigest() // initialize fork digest cache
|
||||
_, err = s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize fork digest")
|
||||
}
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
@@ -334,14 +332,16 @@ func TestPeer_Disconnect(t *testing.T) {
|
||||
|
||||
func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
gs := startup.NewClockSynchronizer()
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs})
|
||||
require.NoError(t, err)
|
||||
|
||||
go s.awaitStateInitialized()
|
||||
fd := initializeStateWithForkDigest(ctx, t, gs)
|
||||
s.setAllForkDigests()
|
||||
s.awaitStateInitialized()
|
||||
|
||||
assert.Equal(t, 0, len(s.joinedTopics))
|
||||
|
||||
@@ -370,15 +370,13 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
// digest associated with that genesis event.
|
||||
func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte {
|
||||
gt := prysmTime.Now()
|
||||
gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32))
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr)))
|
||||
|
||||
fd, err := forks.CreateForkDigest(gt, gvr[:])
|
||||
require.NoError(t, err)
|
||||
gvr := params.BeaconConfig().GenesisValidatorsRoot
|
||||
clock := startup.NewClock(gt, gvr)
|
||||
require.NoError(t, gs.SetClock(clock))
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
|
||||
|
||||
return fd
|
||||
return params.ForkDigest(clock.CurrentEpoch())
|
||||
}
|
||||
|
||||
// TODO: Uncomment out of devnet.
|
||||
|
||||
@@ -27,6 +27,8 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
const nfdEnrKey = "nfd" // The ENR record key for "nfd" (Next Fork Digest).
|
||||
|
||||
var (
|
||||
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
@@ -3,7 +3,6 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -35,17 +34,8 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
// find and connect to a node already subscribed to a specific subnet.
|
||||
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
|
||||
|
||||
// Define the genesis validators root, to ensure everybody is on the same network.
|
||||
const (
|
||||
genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
|
||||
subnetCount = 3
|
||||
minimumPeersPerSubnet = 1
|
||||
)
|
||||
|
||||
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a context.
|
||||
const subnetCount = 3
|
||||
const minimumPeersPerSubnet = 1
|
||||
ctx := t.Context()
|
||||
|
||||
// Use shorter period for testing.
|
||||
@@ -57,6 +47,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
|
||||
// Create flags.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
@@ -73,7 +64,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
bootNodeService := &Service{
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
genesisValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot[:],
|
||||
}
|
||||
|
||||
bootNodeForkDigest, err := bootNodeService.currentForkDigest()
|
||||
@@ -107,7 +98,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
|
||||
|
||||
nodeForkDigest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -152,11 +143,11 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
QUICPort: 3010,
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
|
||||
|
||||
service.Start()
|
||||
defer func() {
|
||||
|
||||
@@ -9,7 +9,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
@@ -24,8 +23,6 @@ go_test(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -35,34 +34,25 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "config.GetForkSchedule")
|
||||
defer span.End()
|
||||
|
||||
schedule := params.BeaconConfig().ForkVersionSchedule
|
||||
schedule := params.SortedForkSchedule()
|
||||
data := make([]*structs.Fork, 0, len(schedule))
|
||||
if len(schedule) == 0 {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: make([]*structs.Fork, 0),
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
versions := forks.SortedForkVersions(schedule)
|
||||
chainForks := make([]*structs.Fork, len(schedule))
|
||||
var previous, current []byte
|
||||
for i, v := range versions {
|
||||
if i == 0 {
|
||||
previous = params.BeaconConfig().GenesisForkVersion
|
||||
} else {
|
||||
previous = current
|
||||
}
|
||||
copyV := v
|
||||
current = copyV[:]
|
||||
chainForks[i] = &structs.Fork{
|
||||
PreviousVersion: hexutil.Encode(previous),
|
||||
CurrentVersion: hexutil.Encode(current),
|
||||
Epoch: fmt.Sprintf("%d", schedule[v]),
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
data = append(data, &structs.Fork{
|
||||
PreviousVersion: hexutil.Encode(previous.ForkVersion[:]),
|
||||
CurrentVersion: hexutil.Encode(entry.ForkVersion[:]),
|
||||
Epoch: fmt.Sprintf("%d", entry.Epoch),
|
||||
})
|
||||
previous = entry
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: chainForks,
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -592,43 +590,34 @@ func TestGetSpec(t *testing.T) {
|
||||
|
||||
func TestForkSchedule_Ok(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
genesisForkVersion := []byte("Genesis")
|
||||
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
|
||||
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
|
||||
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisForkVersion = genesisForkVersion
|
||||
// Create fork schedule adding keys in non-sorted order.
|
||||
schedule := make(map[[4]byte]primitives.Epoch, 3)
|
||||
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
|
||||
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
|
||||
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
|
||||
config.ForkVersionSchedule = schedule
|
||||
params.OverrideBeaconConfig(config)
|
||||
config.InitializeForkSchedule()
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
genesisStr, firstStr, secondStr := hexutil.Encode(config.GenesisForkVersion), hexutil.Encode(config.AltairForkVersion), hexutil.Encode(config.BellatrixForkVersion)
|
||||
GetForkSchedule(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
schedule := params.SortedForkSchedule()
|
||||
require.Equal(t, len(schedule), len(resp.Data))
|
||||
fork := resp.Data[0]
|
||||
assert.DeepEqual(t, hexutil.Encode(genesisForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", firstForkEpoch), fork.Epoch)
|
||||
assert.Equal(t, genesisStr, fork.PreviousVersion)
|
||||
assert.Equal(t, genesisStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.GenesisEpoch), fork.Epoch)
|
||||
fork = resp.Data[1]
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", secondForkEpoch), fork.Epoch)
|
||||
assert.Equal(t, genesisStr, fork.PreviousVersion)
|
||||
assert.Equal(t, firstStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.AltairForkEpoch), fork.Epoch)
|
||||
fork = resp.Data[2]
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(thirdForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", thirdForkEpoch), fork.Epoch)
|
||||
assert.Equal(t, firstStr, fork.PreviousVersion)
|
||||
assert.Equal(t, secondStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.BellatrixForkEpoch), fork.Epoch)
|
||||
})
|
||||
t.Run("correct number of forks", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
@@ -639,8 +628,8 @@ func TestForkSchedule_Ok(t *testing.T) {
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
os := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
assert.Equal(t, os.Len(), len(resp.Data))
|
||||
os := params.SortedForkSchedule()
|
||||
assert.Equal(t, len(os), len(resp.Data))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -12,12 +12,10 @@ go_library(
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -7,12 +7,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -111,17 +109,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
updateEpoch := slots.ToEpoch(updateSlot)
|
||||
updateFork, err := forks.Fork(updateEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get fork Version: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
forkDigest, err := signing.ComputeForkDigest(updateFork.CurrentVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not compute fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
updateEntry := params.GetNetworkScheduleEntry(updateEpoch)
|
||||
updateSSZ, err := update.MarshalSSZ()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not marshal update to SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
@@ -133,7 +121,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if _, err := w.Write(forkDigest[:]); err != nil {
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestServer_GetBeaconConfig(t *testing.T) {
|
||||
t.Skip("this is a weird test")
|
||||
ctx := t.Context()
|
||||
bs := &Server{}
|
||||
res, err := bs.GetBeaconConfig(ctx, &emptypb.Empty{})
|
||||
|
||||
@@ -85,10 +85,10 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -159,6 +159,7 @@ common_deps = [
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
@@ -28,10 +29,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
@@ -156,32 +158,31 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
|
||||
//
|
||||
// DomainData fetches the current domain version information from the beacon state.
|
||||
func (vs *Server) DomainData(ctx context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
fork, err := forks.Fork(request.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headGenesisValidatorsRoot := vs.HeadFetcher.HeadGenesisValidatorsRoot()
|
||||
isExitDomain := [4]byte(request.Domain) == params.BeaconConfig().DomainVoluntaryExit
|
||||
if isExitDomain {
|
||||
epoch := request.Epoch
|
||||
rd := bytesutil.ToBytes4(request.Domain)
|
||||
if bytes.Equal(request.Domain, params.BeaconConfig().DomainVoluntaryExit[:]) {
|
||||
hs, err := vs.HeadFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hs.Version() >= version.Deneb {
|
||||
fork = ðpb.Fork{
|
||||
if slots.ToEpoch(hs.Slot()) >= params.BeaconConfig().DenebForkEpoch {
|
||||
return computeDomainData(rd, epoch, ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
dv, err := signing.Domain(fork, request.Epoch, bytesutil.ToBytes4(request.Domain), headGenesisValidatorsRoot[:])
|
||||
return computeDomainData(rd, request.Epoch, params.ForkFromConfig(params.BeaconConfig(), epoch))
|
||||
}
|
||||
|
||||
func computeDomainData(domain [4]byte, epoch primitives.Epoch, fork *ethpb.Fork) (*ethpb.DomainResponse, error) {
|
||||
gvr := genesis.ValidatorsRoot()
|
||||
domainData, err := signing.Domain(fork, epoch, domain, gvr[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.DomainResponse{
|
||||
SignatureDomain: dv,
|
||||
}, nil
|
||||
return ðpb.DomainResponse{SignatureDomain: domainData}, nil
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
|
||||
@@ -2,6 +2,7 @@ package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,11 +18,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/mock"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -317,55 +320,63 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
|
||||
require.LogsContain(t, hook, "Sending genesis time")
|
||||
}
|
||||
|
||||
func TestServer_DomainData_Exits(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
[4]byte(cfg.GenesisForkVersion): primitives.Epoch(0),
|
||||
[4]byte(cfg.AltairForkVersion): primitives.Epoch(5),
|
||||
[4]byte(cfg.BellatrixForkVersion): primitives.Epoch(10),
|
||||
[4]byte(cfg.CapellaForkVersion): primitives.Epoch(15),
|
||||
[4]byte(cfg.DenebForkVersion): primitives.Epoch(20),
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState := ðpb.BeaconStateBellatrix{
|
||||
Slot: 4000,
|
||||
}
|
||||
block := util.NewBeaconBlock()
|
||||
genesisRoot, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
s, err := state_native.InitializeFromProtoUnsafeBellatrix(beaconState)
|
||||
func testSigDomainForSlot(t *testing.T, domain [4]byte, chsrv *mockChain.ChainService, epoch primitives.Epoch) *ethpb.DomainResponse {
|
||||
cfg := params.BeaconConfig()
|
||||
gvr := genesis.ValidatorsRoot()
|
||||
s, err := state_native.InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
Slot: primitives.Slot(epoch) * cfg.SlotsPerEpoch,
|
||||
GenesisValidatorsRoot: gvr[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
chsrv.State = s
|
||||
vs := &Server{
|
||||
Ctx: t.Context(),
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
HeadFetcher: &mockChain.ChainService{State: s, Root: genesisRoot[:]},
|
||||
HeadFetcher: chsrv,
|
||||
}
|
||||
|
||||
reqDomain, err := vs.DomainData(t.Context(), ðpb.DomainRequest{
|
||||
Epoch: 100,
|
||||
Domain: params.BeaconConfig().DomainDeposit[:],
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
wantedDomain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, params.BeaconConfig().DenebForkVersion, make([]byte, 32))
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)
|
||||
|
||||
beaconStateNew := ðpb.BeaconStateDeneb{
|
||||
Slot: 4000,
|
||||
}
|
||||
s, err = state_native.InitializeFromProtoUnsafeDeneb(beaconStateNew)
|
||||
require.NoError(t, err)
|
||||
vs.HeadFetcher = &mockChain.ChainService{State: s, Root: genesisRoot[:]}
|
||||
|
||||
reqDomain, err = vs.DomainData(t.Context(), ðpb.DomainRequest{
|
||||
Epoch: 100,
|
||||
Domain: params.BeaconConfig().DomainVoluntaryExit[:],
|
||||
domainResp, err := vs.DomainData(t.Context(), ðpb.DomainRequest{
|
||||
Epoch: epoch,
|
||||
Domain: domain[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedDomain, err = signing.ComputeDomain(params.BeaconConfig().DomainVoluntaryExit, params.BeaconConfig().CapellaForkVersion, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)
|
||||
return domainResp
|
||||
}
|
||||
|
||||
func requireSigningEqual(t *testing.T, name string, domain [4]byte, req, want primitives.Epoch, chsrv *mockChain.ChainService) {
|
||||
t.Run(fmt.Sprintf("%s_%#x", name, domain), func(t *testing.T) {
|
||||
gvr := genesis.ValidatorsRoot()
|
||||
resp := testSigDomainForSlot(t, domain, chsrv, req)
|
||||
entry := params.GetNetworkScheduleEntry(want)
|
||||
wanted, err := signing.ComputeDomain(domain, entry.ForkVersion[:], gvr[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, hexutil.Encode(wanted), hexutil.Encode(resp.SignatureDomain))
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_DomainData_Exits(t *testing.T) {
|
||||
// This test makes 2 sets of assertions:
|
||||
// - the deposit domain is always computed wrt the fork version at the given epoch
|
||||
// - the exit domain is the same until deneb, at which point it is always computed wrt the capella fork version
|
||||
params.SetActiveTestCleanup(t, params.MainnetConfig())
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
block := util.NewBeaconBlock()
|
||||
genesisRoot, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
chsrv := &mockChain.ChainService{Root: genesisRoot[:]}
|
||||
last := params.LastForkEpoch()
|
||||
requireSigningEqual(t, "genesis deposit", cfg.DomainDeposit, cfg.GenesisEpoch, cfg.GenesisEpoch, chsrv)
|
||||
requireSigningEqual(t, "altair deposit", cfg.DomainDeposit, cfg.AltairForkEpoch, cfg.AltairForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "bellatrix deposit", cfg.DomainDeposit, cfg.BellatrixForkEpoch, cfg.BellatrixForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "capella deposit", cfg.DomainDeposit, cfg.CapellaForkEpoch, cfg.CapellaForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "deneb deposit", cfg.DomainDeposit, cfg.DenebForkEpoch, cfg.DenebForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "last epoch deposit", cfg.DomainDeposit, last, last, chsrv)
|
||||
|
||||
requireSigningEqual(t, "genesis exit", cfg.DomainVoluntaryExit, cfg.GenesisEpoch, cfg.GenesisEpoch, chsrv)
|
||||
requireSigningEqual(t, "altair exit", cfg.DomainVoluntaryExit, cfg.AltairForkEpoch, cfg.AltairForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "bellatrix exit", cfg.DomainVoluntaryExit, cfg.BellatrixForkEpoch, cfg.BellatrixForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "capella exit", cfg.DomainVoluntaryExit, cfg.CapellaForkEpoch, cfg.CapellaForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "deneb exit", cfg.DomainVoluntaryExit, cfg.DenebForkEpoch, cfg.CapellaForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "last epoch exit", cfg.DomainVoluntaryExit, last, cfg.CapellaForkEpoch, chsrv)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"clock.go",
|
||||
"synchronizer.go",
|
||||
"testing.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/startup",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -35,12 +35,21 @@ func (g *Clock) GenesisValidatorsRoot() [32]byte {
|
||||
return g.vr
|
||||
}
|
||||
|
||||
// GenesisValidatorsRoot returns the genesis state validator root as a slice for convenience.
|
||||
func (g *Clock) GenesisValidatorsRootSlice() []byte {
|
||||
return g.vr[:]
|
||||
}
|
||||
|
||||
// CurrentSlot returns the current slot relative to the time.Time value that Clock embeds.
|
||||
func (g *Clock) CurrentSlot() types.Slot {
|
||||
now := g.now()
|
||||
return slots.Duration(g.t, now)
|
||||
}
|
||||
|
||||
func (g *Clock) CurrentEpoch() types.Epoch {
|
||||
return slots.ToEpoch(g.CurrentSlot())
|
||||
}
|
||||
|
||||
// SlotStart computes the time the given slot begins.
|
||||
func (g *Clock) SlotStart(slot types.Slot) (time.Time, error) {
|
||||
return slots.StartTime(g.t, slot)
|
||||
|
||||
33
beacon-chain/startup/testing.go
Normal file
33
beacon-chain/startup/testing.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package startup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
// MockNower is a mock implementation of the Nower interface for use in tests.
|
||||
type MockNower struct {
|
||||
t time.Time
|
||||
}
|
||||
|
||||
// Now satisfies the Nower interface using a mocked time value
|
||||
func (m *MockNower) Now() time.Time {
|
||||
return m.t
|
||||
}
|
||||
|
||||
// SetSlot sets the current time to the start of the given slot.
|
||||
func (m *MockNower) SetSlot(t *testing.T, c *Clock, s primitives.Slot) {
|
||||
now, err := slots.StartTime(c.GenesisTime(), s)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to set slot: %v", err)
|
||||
}
|
||||
m.t = now
|
||||
}
|
||||
|
||||
// Set sets the current time to the given time.
|
||||
func (m *MockNower) Set(now time.Time) {
|
||||
m.t = now
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package genesis
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
var embeddedStates = map[string]*[]byte{}
|
||||
|
||||
// State returns a copy of the genesis state from a hardcoded value.
|
||||
func State(name string) (state.BeaconState, error) {
|
||||
sb, exists := embeddedStates[name]
|
||||
if exists {
|
||||
return load(*sb)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// load a compressed ssz state file into a beacon state struct.
|
||||
func load(b []byte) (state.BeaconState, error) {
|
||||
st := ðpb.BeaconState{}
|
||||
b, err := snappy.Decode(nil /*dst*/, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafePhase0(st)
|
||||
}
|
||||
@@ -351,3 +351,7 @@ type WriteOnlyDeposits interface {
|
||||
type WriteOnlyProposerLookahead interface {
|
||||
SetProposerLookahead([]primitives.ValidatorIndex) error
|
||||
}
|
||||
|
||||
func IsNil(s BeaconState) bool {
|
||||
return s == nil || s.IsNil()
|
||||
}
|
||||
|
||||
@@ -34,3 +34,7 @@ var fieldMap map[types.FieldIndex]types.DataType
|
||||
func errNotSupported(funcName string, ver int) error {
|
||||
return fmt.Errorf("%s is not supported for %s", funcName, version.String(ver))
|
||||
}
|
||||
|
||||
func IsNil(s state.BeaconState) bool {
|
||||
return s == nil || s.IsNil()
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"fetch_data_column_sidecars.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
@@ -127,7 +128,6 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
@@ -267,7 +267,6 @@ go_test(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/equality:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -35,7 +35,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
@@ -80,7 +79,6 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -107,8 +106,7 @@ func (vr verifier) blockSignatureBatch(b blocks.ROBlock) (*bls.SignatureBatch, e
|
||||
}
|
||||
|
||||
func newBackfillVerifier(vr []byte, keys [][fieldparams.BLSPubkeyLength]byte) (*verifier, error) {
|
||||
dc, err := newDomainCache(vr, params.BeaconConfig().DomainBeaconProposer,
|
||||
forks.NewOrderedSchedule(params.BeaconConfig()))
|
||||
dc, err := newDomainCache(vr, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -122,33 +120,31 @@ func newBackfillVerifier(vr []byte, keys [][fieldparams.BLSPubkeyLength]byte) (*
|
||||
|
||||
// domainCache provides a fast signing domain lookup by epoch.
|
||||
type domainCache struct {
|
||||
fsched forks.OrderedSchedule
|
||||
forkDomains map[[4]byte][]byte
|
||||
dType [bls.DomainByteLength]byte
|
||||
}
|
||||
|
||||
func newDomainCache(vRoot []byte, dType [bls.DomainByteLength]byte, fsched forks.OrderedSchedule) (*domainCache, error) {
|
||||
func newDomainCache(vRoot []byte, dType [bls.DomainByteLength]byte) (*domainCache, error) {
|
||||
dc := &domainCache{
|
||||
fsched: fsched,
|
||||
forkDomains: make(map[[4]byte][]byte),
|
||||
dType: dType,
|
||||
}
|
||||
for _, entry := range fsched {
|
||||
d, err := signing.ComputeDomain(dc.dType, entry.Version[:], vRoot)
|
||||
for _, entry := range params.SortedForkSchedule() {
|
||||
d, err := signing.ComputeDomain(dc.dType, entry.ForkVersion[:], vRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to pre-compute signing domain for fork version=%#x", entry.Version)
|
||||
return nil, errors.Wrapf(err, "failed to pre-compute signing domain for fork version=%#x", entry.ForkVersion)
|
||||
}
|
||||
dc.forkDomains[entry.Version] = d
|
||||
dc.forkDomains[entry.ForkVersion] = d
|
||||
}
|
||||
return dc, nil
|
||||
}
|
||||
|
||||
func (dc *domainCache) forEpoch(e primitives.Epoch) ([]byte, error) {
|
||||
fork, err := dc.fsched.VersionForEpoch(e)
|
||||
fork, err := params.Fork(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d, ok := dc.forkDomains[fork]
|
||||
d, ok := dc.forkDomains[[4]byte(fork.CurrentVersion)]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(errUnknownDomain, "fork version=%#x, epoch=%d", fork, e)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/interop"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -30,18 +29,17 @@ func TestDomainCache(t *testing.T) {
|
||||
}
|
||||
|
||||
vRoot, err := hexutil.Decode("0x0011223344556677889900112233445566778899001122334455667788990011")
|
||||
require.NoError(t, err)
|
||||
dType := cfg.DomainBeaconProposer
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 32, len(vRoot))
|
||||
fsched := forks.NewOrderedSchedule(cfg)
|
||||
dc, err := newDomainCache(vRoot, dType, fsched)
|
||||
dc, err := newDomainCache(vRoot, dType)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(fsched), len(dc.forkDomains))
|
||||
for i := range fsched {
|
||||
e := fsched[i].Epoch
|
||||
ad, err := dc.forEpoch(e)
|
||||
schedule := params.SortedForkSchedule()
|
||||
require.Equal(t, len(schedule), len(dc.forkDomains))
|
||||
for _, entry := range schedule {
|
||||
ad, err := dc.forEpoch(entry.Epoch)
|
||||
require.NoError(t, err)
|
||||
ed, err := signing.ComputeDomain(dType, fsched[i].Version[:], vRoot)
|
||||
ed, err := signing.ComputeDomain(dType, entry.ForkVersion[:], vRoot)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, ed, ad)
|
||||
}
|
||||
|
||||
@@ -18,10 +18,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -48,6 +48,7 @@ type blobsTestCase struct {
|
||||
topic protocol.ID
|
||||
oldestSlot oldestSlotCallback
|
||||
streamReader expectedRequirer
|
||||
currentSlot primitives.Slot
|
||||
}
|
||||
|
||||
type testHandler func(s *Service) rpcHandler
|
||||
@@ -156,11 +157,7 @@ func (r *expectedBlobChunk) requireExpected(t *testing.T, s *Service, stream net
|
||||
|
||||
c, err := readContextFromStream(stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
valRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(r.sidecar.Slot()), valRoot[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ctxBytes, bytesutil.ToBytes4(c))
|
||||
require.Equal(t, params.ForkDigest(slots.ToEpoch(r.sidecar.Slot())), bytesutil.ToBytes4(c))
|
||||
|
||||
sc := ðpb.BlobSidecar{}
|
||||
require.NoError(t, encoding.DecodeWithMaxLength(stream, sc))
|
||||
@@ -180,7 +177,7 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
chain, clock := defaultMockChain(t, 0)
|
||||
chain, clock := defaultMockChain(t, c.currentSlot)
|
||||
if c.chain == nil {
|
||||
c.chain = chain
|
||||
}
|
||||
@@ -270,36 +267,28 @@ func (c *blobsTestCase) run(t *testing.T) {
|
||||
// we use max uints for future forks, but this causes overflows when computing slots
|
||||
// so it is helpful in tests to temporarily reposition the epochs to give room for some math.
|
||||
func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||
if cfg.CapellaForkEpoch == math.MaxUint64 {
|
||||
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch + 100
|
||||
}
|
||||
if cfg.DenebForkEpoch == math.MaxUint64 {
|
||||
cfg.DenebForkEpoch = cfg.CapellaForkEpoch + 100
|
||||
if cfg.FuluForkEpoch == math.MaxUint64 {
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 100
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockChain(t *testing.T, currentSlot uint64) (*mock.ChainService, *startup.Clock) {
|
||||
func defaultMockChain(t *testing.T, currentSlot primitives.Slot) (*mock.ChainService, *startup.Clock) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
df, err := forks.Fork(de)
|
||||
df, err := params.Fork(de)
|
||||
require.NoError(t, err)
|
||||
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||
ce := de + denebBuffer
|
||||
fe := ce - 2
|
||||
cs, err := slots.EpochStart(ce)
|
||||
require.NoError(t, err)
|
||||
now := time.Now()
|
||||
genOffset := types.Slot(params.BeaconConfig().SecondsPerSlot) * cs
|
||||
genesisTime := now.Add(-1 * time.Second * time.Duration(int64(genOffset)))
|
||||
|
||||
clock := startup.NewClock(genesisTime, [32]byte{}, startup.WithNower(
|
||||
func() time.Time {
|
||||
return genesisTime.Add(time.Duration(currentSlot*params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
},
|
||||
))
|
||||
|
||||
genesis := time.Now()
|
||||
mockNow := startup.MockNower{}
|
||||
clock := startup.NewClock(genesis, params.BeaconConfig().GenesisValidatorsRoot, startup.WithNower(mockNow.Now))
|
||||
mockNow.SetSlot(t, clock, primitives.Slot(currentSlot))
|
||||
currentSlotPtr := new(primitives.Slot)
|
||||
*currentSlotPtr = currentSlot
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: fe},
|
||||
Fork: df,
|
||||
Slot: currentSlotPtr,
|
||||
}
|
||||
|
||||
return chain, clock
|
||||
|
||||
@@ -46,7 +46,6 @@ go_test(
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
blocktest "github.com/OffchainLabs/prysm/v6/consensus-types/blocks/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
func TestDownloadFinalizedData(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.InitializeForkSchedule()
|
||||
|
||||
// avoid the altair zone because genesis tests are easier to set up
|
||||
epoch := cfg.AltairForkEpoch - 1
|
||||
@@ -30,7 +30,7 @@ func TestDownloadFinalizedData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forks.ForkForEpochFromConfig(cfg, epoch)
|
||||
fork := params.ForkFromConfig(cfg, epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
blocktest "github.com/OffchainLabs/prysm/v6/consensus-types/blocks/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -83,7 +82,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, epoch)
|
||||
fork, err := params.Fork(epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, wst.SetFork(fork))
|
||||
|
||||
@@ -182,7 +181,7 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
|
||||
fork, err := params.Fork(cfg.GenesisEpoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, wst.SetFork(fork))
|
||||
|
||||
@@ -279,33 +278,11 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
|
||||
require.Equal(t, expectedEpoch, actualEpoch)
|
||||
}
|
||||
|
||||
func forkForEpoch(cfg *params.BeaconChainConfig, epoch primitives.Epoch) (*ethpb.Fork, error) {
|
||||
os := forks.NewOrderedSchedule(cfg)
|
||||
currentVersion, err := os.VersionForEpoch(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevVersion, err := os.Previous(currentVersion)
|
||||
if err != nil {
|
||||
if !errors.Is(err, forks.ErrNoPreviousVersion) {
|
||||
return nil, err
|
||||
}
|
||||
// use same version for both in the case of genesis
|
||||
prevVersion = currentVersion
|
||||
}
|
||||
forkEpoch := cfg.ForkVersionSchedule[currentVersion]
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: prevVersion[:],
|
||||
CurrentVersion: currentVersion[:],
|
||||
Epoch: forkEpoch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, primitives.Epoch) {
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
|
||||
fork, err := forkForEpoch(cfg, cfg.AltairForkEpoch)
|
||||
fork, err := params.Fork(cfg.AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package sync
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
@@ -87,12 +86,8 @@ type ContextByteVersions map[[4]byte]int
|
||||
// and the runtime/version identifier for the corresponding fork.
|
||||
func ContextByteVersionsForValRoot(valRoot [32]byte) (ContextByteVersions, error) {
|
||||
m := make(ContextByteVersions)
|
||||
for fv, v := range params.ConfigForkVersions(params.BeaconConfig()) {
|
||||
digest, err := signing.ComputeForkDigest(fv[:], valRoot[:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to compute fork digest for fork version %#x", fv)
|
||||
}
|
||||
m[digest] = v
|
||||
for _, entry := range params.SortedNetworkScheduleEntries() {
|
||||
m[entry.ForkDigest] = entry.VersionEnum
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func (s *Service) scheduleMissingDataColumnSidecarsBroadcast(
|
||||
})
|
||||
|
||||
// Get the time corresponding to the start of the slot.
|
||||
genesisTime := s.cfg.chain.GenesisTime()
|
||||
genesisTime := s.cfg.clock.GenesisTime()
|
||||
slotStartTime, err := slots.StartTime(genesisTime, slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to calculate slot start time")
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -30,7 +29,6 @@ import (
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
ecdsaprysm "github.com/OffchainLabs/prysm/v6/crypto/ecdsa"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
@@ -400,8 +398,9 @@ func TestRequestDataColumnSidecarsByRoot(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
chainService, clock := defaultMockChain(t, 0)
|
||||
_, clock := defaultMockChain(t, 0)
|
||||
|
||||
// Create test block with blobs
|
||||
pbSignedBeaconBlock := util.NewBeaconBlockDeneb()
|
||||
@@ -584,11 +583,12 @@ func TestRequestDataColumnSidecarsByRoot(t *testing.T) {
|
||||
if cols, exists := tc.skipColumns[setup.offset]; exists {
|
||||
skipColumnsMap = cols
|
||||
}
|
||||
peerP2P := createAndConnectCustodyPeer(t, setup, dataColumnSidecars, chainService, hostP2P, tracker, skipColumnsMap)
|
||||
peerP2P := createAndConnectCustodyPeer(t, setup, dataColumnSidecars, clock, hostP2P, tracker, skipColumnsMap)
|
||||
peerIDs = append(peerIDs, peerP2P.PeerID())
|
||||
}
|
||||
|
||||
ctxMap := map[[4]byte]int{{245, 165, 253, 66}: version.Fulu}
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
verifier := func(cols []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier {
|
||||
clockSync := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clockSync.SetClock(clock))
|
||||
@@ -660,7 +660,7 @@ func TestRequestDataColumnSidecarsByRoot(t *testing.T) {
|
||||
// createAndConnectCustodyPeer creates a new peer with a deterministic private key and connects it to the p2p service.
|
||||
// It then sets up the peer to respond with data columns it custodies.
|
||||
// TODO: Before merging into develop, makes sure the blockRoot is taken into account in this mock.
|
||||
func createAndConnectCustodyPeer(t *testing.T, setup peerSetup, dataColumnSidecars []*pb.DataColumnSidecar, chainService *mock.ChainService, hostP2P *p2ptest.TestP2P, tracker *requestTracker, skipColumns map[uint64]bool) *p2ptest.TestP2P {
|
||||
func createAndConnectCustodyPeer(t *testing.T, setup peerSetup, dataColumnSidecars []*pb.DataColumnSidecar, clock *startup.Clock, hostP2P *p2ptest.TestP2P, tracker *requestTracker, skipColumns map[uint64]bool) *p2ptest.TestP2P {
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := range 32 {
|
||||
privateKeyBytes[i] = byte(setup.offset + i)
|
||||
@@ -714,7 +714,7 @@ func createAndConnectCustodyPeer(t *testing.T, setup peerSetup, dataColumnSideca
|
||||
|
||||
col := dataColumnSidecars[column]
|
||||
|
||||
if err := WriteDataColumnSidecarChunk(stream, chainService, peerP2P.Encoding(), col); err != nil {
|
||||
if err := WriteDataColumnSidecarChunk(stream, clock, peerP2P.Encoding(), col); err != nil {
|
||||
log.WithError(err).Error("Failed to write data column sidecar chunk")
|
||||
closeStream(stream, log)
|
||||
return
|
||||
@@ -1009,7 +1009,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
fuluForkEpoch primitives.Epoch
|
||||
|
||||
// Current slot.
|
||||
currentSlot uint64
|
||||
currentSlot primitives.Slot
|
||||
|
||||
// Blocks with blobs parameters that will be used as `bwb` parameter.
|
||||
blocksParams []blockParams
|
||||
@@ -1366,12 +1366,10 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// Consistency checks.
|
||||
require.Equal(t, len(tc.blocksParams), len(tc.addedRODataColumns))
|
||||
|
||||
// Create a context.
|
||||
ctx := context.Background()
|
||||
|
||||
// Create blocks, RO data columns and data columns sidecar from slot.
|
||||
roBlocks := make([]blocks.ROBlock, len(tc.blocksParams))
|
||||
roDatasColumns := make([][]blocks.RODataColumn, len(tc.blocksParams))
|
||||
@@ -1431,6 +1429,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
conf := params.BeaconConfig()
|
||||
conf.FuluForkEpoch = tc.fuluForkEpoch
|
||||
params.OverrideBeaconConfig(conf)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
// Save the blocks in the store.
|
||||
storage := make(map[[fieldparams.RootLength]byte][]uint64)
|
||||
@@ -1448,7 +1447,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
dataColumnStorageSummarizer := filesystem.NewMockDataColumnStorageSummarizer(t, storage)
|
||||
|
||||
// Create a chain and a clock.
|
||||
chain, clock := defaultMockChain(t, tc.currentSlot)
|
||||
_, clock := defaultMockChain(t, tc.currentSlot)
|
||||
|
||||
// Create the P2P service.
|
||||
p2pSvc := p2ptest.NewTestP2P(t, libp2p.Identity(genFixedCustodyPeer(t)))
|
||||
@@ -1459,7 +1458,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
// Connect the peers.
|
||||
peers := make([]*p2ptest.TestP2P, 0, len(tc.peersParams))
|
||||
for i, peerParams := range tc.peersParams {
|
||||
peer := createAndConnectPeerForRange(t, p2pSvc, chain, dataColumnsSidecarBySlot, peerParams, i)
|
||||
peer := createAndConnectPeerForRange(t, p2pSvc, clock, dataColumnsSidecarBySlot, peerParams, i)
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
|
||||
@@ -1475,11 +1474,8 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
p2pSvc.Peers().SetChainState(peerID, status)
|
||||
}
|
||||
|
||||
clockSync := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clockSync.SetClock(clock))
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxMap := map[[4]byte]int{{245, 165, 253, 66}: version.Fulu}
|
||||
rateLimiter := leakybucket.NewCollector(1_000, 1_000, 1*time.Hour, false)
|
||||
|
||||
// Fetch the data columns from the peers.
|
||||
@@ -1531,7 +1527,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
func createAndConnectPeerForRange(
|
||||
t *testing.T,
|
||||
p2pService *p2ptest.TestP2P,
|
||||
chainService *mock.ChainService,
|
||||
clock *startup.Clock,
|
||||
dataColumnsSidecarFromSlot map[primitives.Slot][]*pb.DataColumnSidecar,
|
||||
peerParams peerParams,
|
||||
offset int,
|
||||
@@ -1574,7 +1570,7 @@ func createAndConnectPeerForRange(
|
||||
dataColumn := dataColumnsSidecar[responseParams.columnIndex]
|
||||
|
||||
// Send the response.
|
||||
err := WriteDataColumnSidecarChunk(stream, chainService, p2pService.Encoding(), dataColumn)
|
||||
err := WriteDataColumnSidecarChunk(stream, clock, p2pService.Encoding(), dataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
@@ -105,29 +103,21 @@ func extractDataTypeFromTypeMap[T any](typeMap map[[4]byte]func() (T, error), di
|
||||
if len(digest) == 0 {
|
||||
f, ok := typeMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return zero, fmt.Errorf("no %T type exists for the genesis fork version", zero)
|
||||
return zero, errors.Wrapf(errInvalidDigest, "no %T type exists for the genesis fork version", zero)
|
||||
}
|
||||
return f()
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return zero, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
return zero, errors.Wrapf(errInvalidDigest, "invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, f := range typeMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return f()
|
||||
}
|
||||
forkVersion, _, err := params.ForkDataFromDigest([4]byte(digest))
|
||||
if err != nil {
|
||||
return zero, errors.Wrapf(ErrNoValidDigest, "could not extract %T data type, saw digest=%#x", zero, digest)
|
||||
}
|
||||
return zero, errors.Wrapf(
|
||||
ErrNoValidDigest,
|
||||
"could not extract %T data type, saw digest=%#x, genesis=%v, vr=%#x",
|
||||
zero,
|
||||
digest,
|
||||
tor.GenesisTime(),
|
||||
tor.GenesisValidatorsRoot(),
|
||||
)
|
||||
|
||||
f, ok := typeMap[forkVersion]
|
||||
if ok {
|
||||
return f()
|
||||
}
|
||||
return zero, errors.Wrapf(ErrNoValidDigest, "could not extract %T data type, saw digest=%#x", zero, digest)
|
||||
}
|
||||
|
||||
@@ -4,13 +4,11 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
@@ -30,8 +28,9 @@ import (
|
||||
)
|
||||
|
||||
func TestService_decodePubsubMessage(t *testing.T) {
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
entry := params.GetNetworkScheduleEntry(params.BeaconConfig().GenesisEpoch)
|
||||
tests := []struct {
|
||||
name string
|
||||
topic string
|
||||
@@ -56,7 +55,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
{
|
||||
name: "invalid topic format",
|
||||
topic: "foo",
|
||||
wantErr: errInvalidTopic,
|
||||
wantErr: p2p.ErrInvalidTopic,
|
||||
},
|
||||
{
|
||||
name: "topic not mapped to any message type",
|
||||
@@ -65,7 +64,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "valid message -- beacon block",
|
||||
topic: fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlock{})], digest),
|
||||
topic: fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlock{})], entry.ForkDigest),
|
||||
input: &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: func() []byte {
|
||||
@@ -102,10 +101,11 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
tt.input.Message.Topic = &topic
|
||||
}
|
||||
got, err := s.decodePubsubMessage(tt.input)
|
||||
if err != nil && err != tt.wantErr && !strings.Contains(err.Error(), tt.wantErr.Error()) {
|
||||
t.Errorf("decodePubsubMessage() error = %v, wantErr %v", err, tt.wantErr)
|
||||
if tt.wantErr != nil {
|
||||
require.ErrorIs(t, err, tt.wantErr, "decodePubsubMessage() error mismatch")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err, "decodePubsubMessage() unexpected error")
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
diff, _ := messagediff.PrettyDiff(got, tt.want)
|
||||
t.Log(diff)
|
||||
@@ -116,24 +116,11 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExtractDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := signing.ComputeForkDigest(params.BeaconConfig().CapellaForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := signing.ComputeForkDigest(params.BeaconConfig().ElectraForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
fuluDigest, err := signing.ComputeForkDigest(params.BeaconConfig().FuluForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
digest [4]byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
@@ -146,40 +133,10 @@ func TestExtractDataType(t *testing.T) {
|
||||
wantAttSlashing ethpb.AttSlashing
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantAttSlashing: ðpb.AttesterSlashing{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantAttSlashing: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
digest: [4]byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
@@ -192,7 +149,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().GenesisEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -208,7 +165,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().AltairForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -225,7 +182,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "bellatrix fork version",
|
||||
args: args{
|
||||
digest: bellatrixDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -242,7 +199,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "capella fork version",
|
||||
args: args{
|
||||
digest: capellaDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().CapellaForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -259,7 +216,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "deneb fork version",
|
||||
args: args{
|
||||
digest: denebDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().DenebForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -276,7 +233,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "electra fork version",
|
||||
args: args{
|
||||
digest: electraDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().ElectraForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -293,7 +250,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "fulu fork version",
|
||||
args: args{
|
||||
digest: fuluDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().FuluForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -310,7 +267,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest, tt.args.chain)
|
||||
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest[:], tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("block: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -318,7 +275,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
if !reflect.DeepEqual(gotBlock, tt.wantBlock) {
|
||||
t.Errorf("block: got = %v, want %v", gotBlock, tt.wantBlock)
|
||||
}
|
||||
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest, tt.args.chain)
|
||||
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest[:], tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("attestation: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -326,7 +283,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
if !reflect.DeepEqual(gotAtt, tt.wantAtt) {
|
||||
t.Errorf("attestation: got = %v, want %v", gotAtt, tt.wantAtt)
|
||||
}
|
||||
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest, tt.args.chain)
|
||||
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest[:], tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("aggregate: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -334,7 +291,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
|
||||
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
|
||||
}
|
||||
gotAttSlashing, err := extractDataTypeFromTypeMap(types.AttesterSlashingMap, tt.args.digest, tt.args.chain)
|
||||
gotAttSlashing, err := extractDataTypeFromTypeMap(types.AttesterSlashingMap, tt.args.digest[:], tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("attester slashing: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -345,3 +302,11 @@ func TestExtractDataType(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractDataTypeFromTypeMapInvalid(t *testing.T) {
|
||||
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}}
|
||||
_, err := extractDataTypeFromTypeMap(types.BlockMap, []byte{0x00, 0x01}, chain)
|
||||
require.ErrorIs(t, err, errInvalidDigest)
|
||||
_, err = extractDataTypeFromTypeMap(types.AttestationMap, []byte{0x00, 0x01}, chain)
|
||||
require.ErrorIs(t, err, errInvalidDigest)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
@@ -42,67 +41,46 @@ func (s *Service) forkWatcher() {
|
||||
|
||||
// registerForUpcomingFork registers appropriate gossip and RPC topic if there is a fork in the next epoch.
|
||||
func (s *Service) registerForUpcomingFork(currentEpoch primitives.Epoch) error {
|
||||
// Get the genesis validators root.
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
nextEntry := params.GetNetworkScheduleEntry(currentEpoch + 1)
|
||||
// Check if there is a fork in the next epoch.
|
||||
isForkNextEpoch, err := forks.IsForkNextEpoch(s.cfg.clock.GenesisTime(), genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not retrieve next fork epoch")
|
||||
}
|
||||
|
||||
// Exit early if there is no fork in the next epoch.
|
||||
if !isForkNextEpoch {
|
||||
if nextEntry.ForkDigest == s.registeredNetworkEntry.ForkDigest {
|
||||
return nil
|
||||
}
|
||||
|
||||
beforeForkEpoch := currentEpoch
|
||||
forkEpoch := beforeForkEpoch + 1
|
||||
|
||||
// Get the fork afterForkDigest for the next epoch.
|
||||
afterForkDigest, err := forks.ForkDigestFromEpoch(forkEpoch, genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve fork digest")
|
||||
}
|
||||
|
||||
// Exit early if the topics for the next epoch are already registered.
|
||||
// It likely to be the case for all slots of the epoch that are not the first one.
|
||||
if s.subHandler.digestExists(afterForkDigest) {
|
||||
if s.subHandler.digestExists(nextEntry.ForkDigest) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Register the subscribers (gossipsub) for the next epoch.
|
||||
s.registerSubscribers(forkEpoch, afterForkDigest)
|
||||
s.registerSubscribers(nextEntry.Epoch, nextEntry.ForkDigest)
|
||||
|
||||
// Get the handlers for the current and next fork.
|
||||
beforeForkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(beforeForkEpoch)
|
||||
currentHandler, err := s.rpcHandlerByTopicFromEpoch(currentEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
|
||||
}
|
||||
|
||||
forkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(forkEpoch)
|
||||
nextHandler, err := s.rpcHandlerByTopicFromEpoch(nextEntry.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from fork epoch")
|
||||
}
|
||||
|
||||
// Compute newly added topics.
|
||||
newRPCHandlerByTopic := addedRPCHandlerByTopic(beforeForkHandlerByTopic, forkHandlerByTopic)
|
||||
newHandlersByTopic := addedRPCHandlerByTopic(currentHandler, nextHandler)
|
||||
|
||||
// Register the new RPC handlers.
|
||||
for topic, handler := range newRPCHandlerByTopic {
|
||||
for topic, handler := range newHandlersByTopic {
|
||||
s.registerRPC(topic, handler)
|
||||
}
|
||||
|
||||
s.registeredNetworkEntry = nextEntry
|
||||
return nil
|
||||
}
|
||||
|
||||
// deregisterFromPastFork deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
|
||||
func (s *Service) deregisterFromPastFork(currentEpoch primitives.Epoch) error {
|
||||
// Extract the genesis validators root.
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
// Get the fork.
|
||||
currentFork, err := forks.Fork(currentEpoch)
|
||||
currentFork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "genesis validators root")
|
||||
}
|
||||
@@ -123,10 +101,7 @@ func (s *Service) deregisterFromPastFork(currentEpoch primitives.Epoch) error {
|
||||
// Look at the previous fork's digest.
|
||||
beforeForkEpoch := currentFork.Epoch - 1
|
||||
|
||||
beforeForkDigest, err := forks.ForkDigestFromEpoch(beforeForkEpoch, genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fork digest from epoch")
|
||||
}
|
||||
beforeForkDigest := params.ForkDigest(beforeForkEpoch)
|
||||
|
||||
// Exit early if there are no topics with that particular digest.
|
||||
if !s.subHandler.digestExists(beforeForkDigest) {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
)
|
||||
@@ -91,9 +90,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -135,9 +132,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -177,9 +172,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -221,9 +214,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -266,9 +257,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -387,14 +376,12 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
r.registerRPC(topic, handler)
|
||||
}
|
||||
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(0, genRoot[:])
|
||||
digest := params.ForkDigest(0)
|
||||
assert.NoError(t, err)
|
||||
r.registerSubscribers(0, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest = params.ForkDigest(3)
|
||||
r.registerSubscribers(3, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
@@ -403,12 +390,9 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(0, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(0)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest = params.ForkDigest(3)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
|
||||
ptcls := s.cfg.p2p.Host().Mux().Protocols()
|
||||
@@ -455,14 +439,11 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(1, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(1)
|
||||
r.registerSubscribers(1, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest = params.ForkDigest(3)
|
||||
r.registerSubscribers(3, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
@@ -471,12 +452,9 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(1, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(1)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest = params.ForkDigest(3)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"api.go",
|
||||
"file.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/genesis",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,48 +0,0 @@
|
||||
package genesis
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
"github.com/OffchainLabs/prysm/v6/api/client/beacon"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// APIInitializer manages initializing the genesis state and block to prepare the beacon node for syncing.
|
||||
// The genesis state is retrieved from the remote beacon node api, using the debug state retrieval endpoint.
|
||||
type APIInitializer struct {
|
||||
c *beacon.Client
|
||||
}
|
||||
|
||||
// NewAPIInitializer creates an APIInitializer, handling the set up of a beacon node api client
|
||||
// using the provided host string.
|
||||
func NewAPIInitializer(beaconNodeHost string) (*APIInitializer, error) {
|
||||
c, err := beacon.NewClient(beaconNodeHost, client.WithMaxBodySize(client.MaxBodySizeState))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse beacon node url or hostname - %s", beaconNodeHost)
|
||||
}
|
||||
return &APIInitializer{c: c}, nil
|
||||
}
|
||||
|
||||
// Initialize downloads origin state and block for checkpoint sync and initializes database records to
|
||||
// prepare the node to begin syncing from that point.
|
||||
func (dl *APIInitializer) Initialize(ctx context.Context, d db.Database) error {
|
||||
existing, err := d.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existing != nil && !existing.IsNil() {
|
||||
htr, err := existing.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error while computing hash_tree_root of existing genesis state")
|
||||
}
|
||||
log.Warnf("database contains genesis with htr=%#x, ignoring remote genesis state parameter", htr)
|
||||
return nil
|
||||
}
|
||||
sb, err := dl.c.GetState(ctx, beacon.IdGenesis)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error retrieving genesis state from %s", dl.c.NodeURL())
|
||||
}
|
||||
return d.LoadGenesis(ctx, sb)
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package genesis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/io/file"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Initializer describes a type that is able to obtain the checkpoint sync data (BeaconState and SignedBeaconBlock)
|
||||
// in some way and perform database setup to prepare the beacon node for syncing from the given checkpoint.
|
||||
// See FileInitializer and APIInitializer.
|
||||
type Initializer interface {
|
||||
Initialize(ctx context.Context, d db.Database) error
|
||||
}
|
||||
|
||||
// NewFileInitializer validates the given path information and creates an Initializer which will
|
||||
// use the provided state and block files to prepare the node for checkpoint sync.
|
||||
func NewFileInitializer(statePath string) (*FileInitializer, error) {
|
||||
var err error
|
||||
if err = existsAndIsFile(statePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// stat just to make sure it actually exists and is a file
|
||||
return &FileInitializer{statePath: statePath}, nil
|
||||
}
|
||||
|
||||
// FileInitializer initializes a beacon-node database genesis state and block
|
||||
// using ssz-encoded state data stored in files on the local filesystem.
|
||||
type FileInitializer struct {
|
||||
statePath string
|
||||
}
|
||||
|
||||
// Initialize is called in the BeaconNode db startup code if an Initializer is present.
|
||||
// Initialize prepares the beacondb using the provided genesis state.
|
||||
func (fi *FileInitializer) Initialize(ctx context.Context, d db.Database) error {
|
||||
serState, err := file.ReadFileAsBytes(fi.statePath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading state file %s for checkpoint sync init", fi.statePath)
|
||||
}
|
||||
log.WithField(
|
||||
"hash", fmt.Sprintf("%#x", hash.FastSum256(serState)),
|
||||
).Info("Loading genesis state from disk.")
|
||||
return d.LoadGenesis(ctx, serState)
|
||||
}
|
||||
|
||||
var _ Initializer = &FileInitializer{}
|
||||
|
||||
func existsAndIsFile(path string) error {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking existence of ssz-encoded file %s for genesis state init", path)
|
||||
}
|
||||
if info.IsDir() {
|
||||
return fmt.Errorf("%s is a directory, please specify full path to file", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -266,31 +266,23 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) {
|
||||
func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen(t *testing.T) {
|
||||
// Setup configuration and fork version schedule.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
fvs := map[[fieldparams.VersionLength]byte]primitives.Epoch{
|
||||
bytesutil.ToBytes4(cfg.GenesisForkVersion): 1,
|
||||
bytesutil.ToBytes4(cfg.AltairForkVersion): 2,
|
||||
bytesutil.ToBytes4(cfg.BellatrixForkVersion): 3,
|
||||
bytesutil.ToBytes4(cfg.CapellaForkVersion): 4,
|
||||
bytesutil.ToBytes4(cfg.DenebForkVersion): 5,
|
||||
bytesutil.ToBytes4(cfg.FuluForkVersion): 6,
|
||||
bytesutil.ToBytes4(cfg.ElectraForkVersion): 0,
|
||||
}
|
||||
cfg.ForkVersionSchedule = fvs
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
// Initialize logging, database, and P2P components.
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbtest.SetupDB(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
validators := uint64(256)
|
||||
currentSlot := 1 + (primitives.Slot(params.BeaconConfig().ElectraForkEpoch) * params.BeaconConfig().SlotsPerEpoch)
|
||||
genesisOffset := time.Duration(currentSlot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
clock := startup.NewClock(time.Now().Add(-1*genesisOffset), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
|
||||
// Create genesis state and associated keys.
|
||||
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, validators)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
require.NoError(t, beaconState.SetSlot(clock.CurrentSlot()))
|
||||
|
||||
// Create and save a new Beacon block.
|
||||
sb := util.NewBeaconBlockElectra()
|
||||
sb.Block.Slot = clock.CurrentSlot()
|
||||
util.SaveBlock(t, t.Context(), db, sb)
|
||||
|
||||
// Save state with block root.
|
||||
@@ -301,10 +293,10 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
att := ðpb.SingleAttestation{
|
||||
CommitteeId: 8, // choose a non 0
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
Slot: clock.CurrentSlot(),
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: root[:]},
|
||||
Source: ðpb.Checkpoint{Epoch: clock.CurrentEpoch() - 1, Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: clock.CurrentEpoch(), Root: root[:]},
|
||||
CommitteeIndex: 0,
|
||||
},
|
||||
}
|
||||
@@ -318,7 +310,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
att.AttesterIndex = committee[0]
|
||||
|
||||
// Compute attester domain and signature.
|
||||
attesterDomain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
attesterDomain, err := signing.Domain(beaconState.Fork(), clock.CurrentEpoch(), params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := signing.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
assert.NoError(t, err)
|
||||
@@ -333,7 +325,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
Epoch: clock.CurrentEpoch() - 2,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -354,7 +346,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis.Add(time.Duration(-1*int(params.BeaconConfig().SecondsPerSlot))*time.Second), chain.ValidatorsRoot),
|
||||
clock: clock,
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: &mock.SimpleNotifier{Feed: opn},
|
||||
},
|
||||
@@ -365,7 +357,8 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
go r.verifierRoutine()
|
||||
|
||||
// Save a new beacon state and link it with the block root.
|
||||
s, err := util.NewBeaconStateElectra()
|
||||
slotOpt := func(s *ethpb.BeaconStateElectra) error { s.Slot = clock.CurrentSlot(); return nil }
|
||||
s, err := util.NewBeaconStateElectra(slotOpt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ func (s *Service) streamBlobBatch(ctx context.Context, batch blockBatch, wQuota
|
||||
return wQuota, errors.Wrapf(err, "could not retrieve sidecar: index %d, block root %#x", i, root)
|
||||
}
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
@@ -77,12 +77,10 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
// return err
|
||||
// }
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
rp, err := validateBlobsByRange(r, s.cfg.chain.CurrentSlot())
|
||||
rp, err := validateBlobsByRange(r, s.cfg.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.downscorePeer(remotePeer, "blobSidecarsByRangeRpcHandlerValidationError")
|
||||
s.downscorePeer(stream.Conn().RemotePeer(), "blobSidecarsByRangeRpcHandlerValidationError")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -13,7 +14,10 @@ import (
|
||||
)
|
||||
|
||||
func (c *blobsTestCase) defaultOldestSlotByRange(t *testing.T) types.Slot {
|
||||
currentEpoch := slots.ToEpoch(c.chain.CurrentSlot())
|
||||
currentEpoch := c.clock.CurrentEpoch()
|
||||
if currentEpoch < params.BeaconConfig().MinEpochsForBlobsSidecarsRequest {
|
||||
t.Fatal("bad test setup, current epoch is less than MinEpochsForBlobsSidecarsRequest")
|
||||
}
|
||||
oldestEpoch := currentEpoch - params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
if oldestEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
oldestEpoch = params.BeaconConfig().DenebForkEpoch
|
||||
@@ -99,10 +103,12 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
nc.MaxRequestBlobSidecars = 100
|
||||
params.OverrideBeaconConfig(nc)
|
||||
|
||||
defaultSlot := primitives.Slot(params.BeaconConfig().DenebForkEpoch+1000) * params.BeaconConfig().SlotsPerEpoch
|
||||
cases := []*blobsTestCase{
|
||||
{
|
||||
name: "beginning of window + 10",
|
||||
nblocks: 10,
|
||||
name: "beginning of window + 10",
|
||||
nblocks: 10,
|
||||
currentSlot: defaultSlot,
|
||||
},
|
||||
{
|
||||
name: "10 slots before window, 10 slots after, count = 20",
|
||||
@@ -113,6 +119,7 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
Count: 20,
|
||||
}
|
||||
},
|
||||
currentSlot: defaultSlot,
|
||||
},
|
||||
{
|
||||
name: "request before window, empty response",
|
||||
@@ -123,7 +130,8 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
Count: 10,
|
||||
}
|
||||
},
|
||||
total: func() *int { x := 0; return &x }(),
|
||||
total: func() *int { x := 0; return &x }(),
|
||||
currentSlot: defaultSlot,
|
||||
},
|
||||
{
|
||||
name: "10 blocks * 4 blobs = 40",
|
||||
@@ -134,7 +142,8 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
Count: 20,
|
||||
}
|
||||
},
|
||||
total: func() *int { x := params.BeaconConfig().MaxBlobsPerBlock(0) * 10; return &x }(), // 10 blocks * 4 blobs = 40
|
||||
total: func() *int { x := params.BeaconConfig().MaxBlobsPerBlock(0) * 10; return &x }(), // 10 blocks * 4 blobs = 40
|
||||
currentSlot: defaultSlot,
|
||||
},
|
||||
{
|
||||
name: "when request count > MAX_REQUEST_BLOCKS_DENEB, MAX_REQUEST_BLOBS_SIDECARS sidecars in response",
|
||||
@@ -145,7 +154,8 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
Count: params.BeaconConfig().MaxRequestBlocksDeneb + 1,
|
||||
}
|
||||
},
|
||||
total: func() *int { x := int(params.BeaconConfig().MaxRequestBlobSidecars); return &x }(),
|
||||
total: func() *int { x := int(params.BeaconConfig().MaxRequestBlobSidecars); return &x }(),
|
||||
currentSlot: defaultSlot,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -183,7 +193,6 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
name string
|
||||
current types.Slot
|
||||
req *ethpb.BlobSidecarsByRangeRequest
|
||||
// chain := defaultMockChain(t)
|
||||
|
||||
start types.Slot
|
||||
end types.Slot
|
||||
|
||||
@@ -97,7 +97,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
}
|
||||
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
@@ -30,57 +28,9 @@ func WriteBlockChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, en
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
var obtainedCtx []byte
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
switch blk.Version() {
|
||||
case version.Phase0:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().GenesisEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Altair:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Bellatrix:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Capella:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Deneb:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Electra:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Fulu:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
default:
|
||||
return errors.Wrapf(ErrUnrecognizedVersion, "block version %d is not recognized", blk.Version())
|
||||
}
|
||||
|
||||
if err := writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(blk.Block().Slot()))
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := encoding.EncodeWithMaxLength(stream, blk)
|
||||
@@ -150,16 +100,11 @@ func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOrac
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(sidecar.Slot()), valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctxBytes := params.ForkDigest(slots.ToEpoch(sidecar.Slot()))
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, sidecar)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, sidecar)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -168,18 +113,12 @@ func WriteLightClientBootstrapChunk(stream libp2pcore.Stream, tor blockchain.Tem
|
||||
return err
|
||||
}
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(bootstrap.Header().Beacon().Slot), valRoot[:])
|
||||
if err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(bootstrap.Header().Beacon().Slot))
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obtainedCtx := digest[:]
|
||||
if err = writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = encoding.EncodeWithMaxLength(stream, bootstrap)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, bootstrap)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -188,17 +127,11 @@ func WriteLightClientUpdateChunk(stream libp2pcore.Stream, tor blockchain.Tempor
|
||||
return err
|
||||
}
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), valRoot[:])
|
||||
if err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx := digest[:]
|
||||
|
||||
if err = writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, update)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, update)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -207,17 +140,12 @@ func WriteLightClientOptimisticUpdateChunk(stream libp2pcore.Stream, tor blockch
|
||||
return err
|
||||
}
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx := digest[:]
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
|
||||
if err = writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, update)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, update)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -226,17 +154,12 @@ func WriteLightClientFinalityUpdateChunk(stream libp2pcore.Stream, tor blockchai
|
||||
return err
|
||||
}
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx := digest[:]
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
|
||||
if err = writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, update)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, update)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -247,20 +170,13 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return errors.Wrap(err, "stream write")
|
||||
}
|
||||
|
||||
// Fork digest.
|
||||
genesisValidatorsRoot := tor.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(sidecar.SignedBlockHeader.Header.Slot), genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fork digest from epoch")
|
||||
}
|
||||
|
||||
ctxBytes := params.ForkDigest(slots.ToEpoch(sidecar.SignedBlockHeader.Header.Slot))
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return errors.Wrap(err, "write context to stream")
|
||||
}
|
||||
|
||||
// Sidecar.
|
||||
if _, err = encoding.EncodeWithMaxLength(stream, sidecar); err != nil {
|
||||
if _, err := encoding.EncodeWithMaxLength(stream, sidecar); err != nil {
|
||||
return errors.Wrap(err, "encode with max length")
|
||||
}
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
}
|
||||
|
||||
// Validate the request regarding its parameters.
|
||||
rangeParameters, err := validateDataColumnsByRange(request, s.cfg.chain.CurrentSlot())
|
||||
rangeParameters, err := validateDataColumnsByRange(request, s.cfg.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.downscorePeer(remotePeer, "dataColumnSidecarsByRangeRpcHandlerValidationError")
|
||||
@@ -151,7 +151,7 @@ func (s *Service) streamDataColumnBatch(ctx context.Context, batch blockBatch, q
|
||||
sidecar := verifiedRODataColumn.DataColumnSidecar
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
|
||||
if err := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sidecar); err != nil {
|
||||
if err := WriteDataColumnSidecarChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), sidecar); err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return quota, errors.Wrap(err, "write data column sidecar chunk")
|
||||
|
||||
@@ -19,26 +19,37 @@ import (
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
//beaconConfig.FuluForkEpoch = beaconConfig.ElectraForkEpoch + 100
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx := context.Background()
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
err := service.dataColumnSidecarsByRangeRPCHandler(ctx, nil, nil)
|
||||
require.ErrorIs(t, err, notDataColumnsByRangeIdentifiersError)
|
||||
})
|
||||
mockNower := &startup.MockNower{}
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot, startup.WithNower(mockNower.Now))
|
||||
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
t.Run("invalid request", func(t *testing.T) {
|
||||
slot := primitives.Slot(400)
|
||||
mockNower.SetSlot(t, clock, slot)
|
||||
|
||||
localP2P, remoteP2P := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -48,6 +59,7 @@ func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
chain: &chainMock.ChainService{
|
||||
Slot: &slot,
|
||||
},
|
||||
clock: clock,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
@@ -83,11 +95,6 @@ func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
slot := primitives.Slot(400)
|
||||
|
||||
params := []util.DataColumnParam{
|
||||
@@ -99,7 +106,7 @@ func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := storage.Save(verifiedRODataColumns)
|
||||
err = storage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
localP2P, remoteP2P := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
@@ -140,22 +147,18 @@ func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
err = beaconDB.SaveROBlocks(ctx, roBlocks, false /*cache*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockNower.SetSlot(t, clock, slot)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
chain: &chainMock.ChainService{
|
||||
Slot: &slot,
|
||||
},
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
chain: &chainMock.ChainService{},
|
||||
dataColumnStorage: storage,
|
||||
clock: clock,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
ctxMap := ContextByteVersions{
|
||||
[4]byte{245, 165, 253, 66}: version.Fulu,
|
||||
}
|
||||
|
||||
root0 := verifiedRODataColumns[0].BlockRoot()
|
||||
root3 := verifiedRODataColumns[3].BlockRoot()
|
||||
root5 := verifiedRODataColumns[5].BlockRoot()
|
||||
|
||||
@@ -133,7 +133,7 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
}
|
||||
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), verifiedRODataColumn.DataColumnSidecar); chunkErr != nil {
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), verifiedRODataColumn.DataColumnSidecar); chunkErr != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
return chunkErr
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
@@ -28,10 +27,17 @@ import (
|
||||
)
|
||||
|
||||
func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
err := service.dataColumnSidecarByRootRPCHandler(ctx, nil, nil)
|
||||
err := service.dataColumnSidecarByRootRPCHandler(t.Context(), nil, nil)
|
||||
require.ErrorIs(t, err, notDataColumnsByRootIdentifiersError)
|
||||
})
|
||||
|
||||
@@ -59,13 +65,13 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := types.DataColumnsByRootIdentifiers{{Columns: []uint64{1, 2, 3}}}
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
err = service.dataColumnSidecarByRootRPCHandler(ctx, msg, stream)
|
||||
err = service.dataColumnSidecarByRootRPCHandler(t.Context(), msg, stream)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
|
||||
|
||||
@@ -125,10 +131,6 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
ctxMap := ContextByteVersions{
|
||||
[4]byte{245, 165, 253, 66}: version.Fulu,
|
||||
}
|
||||
|
||||
root0 := verifiedRODataColumns[0].BlockRoot()
|
||||
root3 := verifiedRODataColumns[3].BlockRoot()
|
||||
root5 := verifiedRODataColumns[5].BlockRoot()
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -62,17 +61,11 @@ func TestRPC_LightClientBootstrap(t *testing.T) {
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
|
||||
altairDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
altairDigest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
electraDigest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
for i := 1; i <= 5; i++ {
|
||||
t.Run(version.String(i), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, i)
|
||||
@@ -184,16 +177,11 @@ func TestRPC_LightClientOptimisticUpdate(t *testing.T) {
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
|
||||
altairDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
electraDigest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
|
||||
for i := 1; i <= 5; i++ {
|
||||
t.Run(version.String(i), func(t *testing.T) {
|
||||
@@ -304,16 +292,11 @@ func TestRPC_LightClientFinalityUpdate(t *testing.T) {
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
|
||||
altairDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
electraDigest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
|
||||
for i := 1; i <= 5; i++ {
|
||||
t.Run(version.String(i), func(t *testing.T) {
|
||||
@@ -424,16 +407,11 @@ func TestRPC_LightClientUpdatesByRange(t *testing.T) {
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
|
||||
altairDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
electraDigest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
|
||||
for i := 1; i <= 5; i++ {
|
||||
t.Run(version.String(i), func(t *testing.T) {
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -181,17 +181,9 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, peerID peer.ID) (meta
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Get the genesis validators root.
|
||||
valRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
// Get the fork digest from the current epoch and the genesis validators root.
|
||||
rpcCtx, err := forks.ForkDigestFromEpoch(currentEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fork digest from epoch")
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(currentEpoch)
|
||||
// Instantiate zero value of the metadata.
|
||||
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, rpcCtx[:], s.cfg.clock)
|
||||
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, digest[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "extract data type from type map")
|
||||
}
|
||||
|
||||
@@ -888,6 +888,13 @@ func TestErrInvalidFetchedDataDistinction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
nilTestCases := []struct {
|
||||
name string
|
||||
request *ethpb.DataColumnSidecarsByRangeRequest
|
||||
@@ -1033,10 +1040,7 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
ctx := t.Context()
|
||||
ctxMap := ContextByteVersions{[4]byte{245, 165, 253, 66}: version.Fulu}
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(ctx, clock, p1, p2.PeerID(), ctxMap, requestSent)
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, requestSent)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
@@ -1181,6 +1185,13 @@ func TestIsSidecarIndexRequested(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
nilTestCases := []struct {
|
||||
name string
|
||||
request p2ptypes.DataColumnsByRootIdentifiers
|
||||
@@ -1335,10 +1346,7 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
ctx := t.Context()
|
||||
ctxMap := ContextByteVersions{[4]byte{245, 165, 253, 66}: version.Fulu}
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(ctx, clock, p1, p2.PeerID(), ctxMap, sentRequest)
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, sentRequest)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
|
||||
@@ -178,6 +178,7 @@ type Service struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
dataColumnLogCh chan dataColumnLogEntry
|
||||
registeredNetworkEntry params.NetworkScheduleEntry
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
|
||||
@@ -15,14 +15,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/messagehandler"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -58,21 +57,17 @@ type (
|
||||
// but for which no subscriptions are needed.
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
|
||||
// parameters used for the `subscribeToSubnets` function.
|
||||
subscribeToSubnetsParameters struct {
|
||||
subscriptionBySubnet map[uint64]*pubsub.Subscription
|
||||
topicFormat string
|
||||
digest [4]byte
|
||||
genesisValidatorsRoot [fieldparams.RootLength]byte
|
||||
genesisTime time.Time
|
||||
currentSlot primitives.Slot
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
)
|
||||
|
||||
type subscribeToSubnetsParameters struct {
|
||||
subscriptionBySubnet map[uint64]*pubsub.Subscription
|
||||
topicFormat string
|
||||
digest [4]byte
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
|
||||
var errInvalidDigest = errors.New("invalid digest")
|
||||
|
||||
// noopValidator is a no-op that only decodes the message, but does not check its contents.
|
||||
@@ -244,8 +239,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
// The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, digest [4]byte) *pubsub.Subscription {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
_, e, err := params.ForkDataFromDigest(digest)
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
panic(err) // lint:nopanic -- Impossible condition.
|
||||
@@ -454,8 +448,7 @@ func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the validity of the digest.
|
||||
valid, err := isDigestValid(p.digest, p.genesisTime, p.genesisValidatorsRoot)
|
||||
valid, err := isDigestValid(p.digest, s.cfg.clock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "is digest valid")
|
||||
}
|
||||
@@ -467,13 +460,8 @@ func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
return errInvalidDigest
|
||||
}
|
||||
|
||||
// Retrieve the subnets we want to join.
|
||||
subnetsToJoin := p.getSubnetsToJoin(p.currentSlot)
|
||||
|
||||
// Remove subscriptions that are no longer wanted.
|
||||
subnetsToJoin := p.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneSubscriptions(p.subscriptionBySubnet, subnetsToJoin, p.topicFormat, p.digest)
|
||||
|
||||
// Subscribe to wanted and not already registered subnets.
|
||||
for subnet := range subnetsToJoin {
|
||||
subnetTopic := fmt.Sprintf(p.topicFormat, p.digest, subnet)
|
||||
|
||||
@@ -488,44 +476,34 @@ func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
|
||||
// subscribeWithParameters subscribes to a list of subnets.
|
||||
func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
subscriptionBySubnet := make(map[uint64]*pubsub.Subscription)
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
genesisTime := s.cfg.clock.GenesisTime()
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
secondsPerSlotDuration := time.Duration(secondsPerSlot) * time.Second
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
neededSubnets := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
|
||||
shortTopicFormat := p.topicFormat
|
||||
shortTopicFormatLen := len(shortTopicFormat)
|
||||
if shortTopicFormatLen >= 3 && shortTopicFormat[shortTopicFormatLen-3:] == "_%d" {
|
||||
shortTopicFormat = shortTopicFormat[:shortTopicFormatLen-3]
|
||||
}
|
||||
|
||||
shortTopic := fmt.Sprintf(shortTopicFormat, p.digest)
|
||||
|
||||
parameters := subscribeToSubnetsParameters{
|
||||
subscriptionBySubnet: subscriptionBySubnet,
|
||||
topicFormat: p.topicFormat,
|
||||
digest: p.digest,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
genesisTime: genesisTime,
|
||||
currentSlot: currentSlot,
|
||||
validate: p.validate,
|
||||
handle: p.handle,
|
||||
getSubnetsToJoin: p.getSubnetsToJoin,
|
||||
subscriptionBySubnet: make(map[uint64]*pubsub.Subscription),
|
||||
topicFormat: p.topicFormat,
|
||||
digest: p.digest,
|
||||
validate: p.validate,
|
||||
handle: p.handle,
|
||||
getSubnetsToJoin: p.getSubnetsToJoin,
|
||||
}
|
||||
|
||||
err := s.subscribeToSubnets(parameters)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not subscribe to subnets")
|
||||
}
|
||||
|
||||
current := s.cfg.clock.CurrentSlot()
|
||||
slotDuration := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
neededSubnets := computeAllNeededSubnets(current, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
// Subscribe to expected subnets and search for peers if needed at every slot.
|
||||
go func() {
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, secondsPerSlotDuration)
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDuration)
|
||||
defer cancel()
|
||||
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, p.topicFormat, p.digest, minimumPeersPerSubnet, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
@@ -533,25 +511,23 @@ func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
}
|
||||
}()
|
||||
|
||||
slotTicker := slots.NewSlotTicker(genesisTime, secondsPerSlot)
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case currentSlot := <-slotTicker.C():
|
||||
parameters.currentSlot = currentSlot
|
||||
case <-slotTicker.C():
|
||||
if err := s.subscribeToSubnets(parameters); err != nil {
|
||||
if errors.Is(err, errInvalidDigest) {
|
||||
log.WithField("topics", shortTopic).Debug("Digest is invalid, stopping subscription")
|
||||
return
|
||||
}
|
||||
|
||||
log.WithError(err).Error("Could not subscribe to subnets")
|
||||
continue
|
||||
}
|
||||
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, secondsPerSlotDuration)
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDuration)
|
||||
defer cancel()
|
||||
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, p.topicFormat, p.digest, minimumPeersPerSubnet, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
@@ -574,7 +550,7 @@ func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
for {
|
||||
select {
|
||||
case <-logTicker.C:
|
||||
subnetsToFindPeersIndex := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
subnetsToFindPeersIndex := computeAllNeededSubnets(current, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
|
||||
isSubnetWithMissingPeers := false
|
||||
// Find new peers for wanted subnets if needed.
|
||||
@@ -750,27 +726,19 @@ func (*Service) addDigestAndIndexToTopic(topic string, digest [4]byte, idx uint6
|
||||
}
|
||||
|
||||
func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
return forks.CreateForkDigest(s.cfg.clock.GenesisTime(), genRoot[:])
|
||||
return params.ForkDigest(s.cfg.clock.CurrentEpoch()), nil
|
||||
}
|
||||
|
||||
// Checks if the provided digest matches up with the current supposed digest.
|
||||
func isDigestValid(digest [4]byte, genesis time.Time, genValRoot [32]byte) (bool, error) {
|
||||
retDigest, err := forks.CreateForkDigest(genesis, genValRoot[:])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isNextEpoch, err := forks.IsForkNextEpoch(genesis, genValRoot[:])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
func isDigestValid(digest [4]byte, clock *startup.Clock) (bool, error) {
|
||||
current := clock.CurrentEpoch()
|
||||
// In the event there is a fork the next epoch,
|
||||
// we skip the check, as we subscribe subnets an
|
||||
// epoch in advance.
|
||||
if isNextEpoch {
|
||||
if params.DigestChangesAfter(current) {
|
||||
return true, nil
|
||||
}
|
||||
return retDigest == digest, nil
|
||||
return params.ForkDigest(current) == digest, nil
|
||||
}
|
||||
|
||||
// computeAllNeededSubnets computes the subnets we want to join
|
||||
|
||||
@@ -78,6 +78,10 @@ func (s *Service) processSidecarsFromExecution(ctx context.Context, block interf
|
||||
// processDataColumnSidecarsFromExecution retrieves (if available) data column sidecars data from the execution client,
|
||||
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
|
||||
func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, roSignedBlock interfaces.ReadOnlySignedBeaconBlock) {
|
||||
if features.Get().DisableGetBlobs {
|
||||
return
|
||||
}
|
||||
|
||||
block := roSignedBlock.Block()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
@@ -159,7 +163,7 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, ro
|
||||
// processBlobSidecarsFromExecution retrieves (if available) blob sidecars data from the execution client,
|
||||
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
|
||||
func (s *Service) processBlobSidecarsFromExecution(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) {
|
||||
startTime, err := slots.StartTime(s.cfg.chain.GenesisTime(), block.Block().Slot())
|
||||
startTime, err := slots.StartTime(s.cfg.clock.GenesisTime(), block.Block().Slot())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to convert slot to time")
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -319,8 +318,8 @@ func Test_wrapAndReportValidation(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{0x01},
|
||||
}
|
||||
fd, err := forks.CreateForkDigest(mChain.GenesisTime(), mChain.ValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(mChain.Genesis, mChain.ValidatorsRoot)
|
||||
fd := params.ForkDigest(clock.CurrentEpoch())
|
||||
mockTopic := fmt.Sprintf(p2p.BlockSubnetTopicFormat, fd) + encoder.SszNetworkEncoder{}.ProtocolSuffix()
|
||||
type args struct {
|
||||
topic string
|
||||
@@ -560,26 +559,21 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) {
|
||||
|
||||
func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.SecondsPerSlot = 1
|
||||
cfg.SlotsPerEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
currSlot := primitives.Slot(100)
|
||||
gt := time.Now().Add(-time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
vr := [32]byte{'A'}
|
||||
defer cancel()
|
||||
vr := params.BeaconConfig().GenesisValidatorsRoot
|
||||
mockNow := &startup.MockNower{}
|
||||
clock := startup.NewClock(time.Now(), vr, startup.WithNower(mockNow.Now))
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
mockNow.SetSlot(t, clock, denebSlot)
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
Slot: &currSlot,
|
||||
},
|
||||
clock: startup.NewClock(gt, vr),
|
||||
chain: &mockChain.ChainService{},
|
||||
clock: clock,
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
@@ -588,16 +582,19 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), 0, []uint64{0, 1}, 10*time.Second)
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(r.cfg.clock.CurrentEpoch())
|
||||
version, e, err := params.ForkDataFromDigest(digest)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().DenebForkVersion), version)
|
||||
require.Equal(t, params.BeaconConfig().DenebForkEpoch, e)
|
||||
|
||||
r.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
})
|
||||
time.Sleep(2 * time.Second)
|
||||
sp := subscribeToSubnetsParameters{
|
||||
subscriptionBySubnet: make(map[uint64]*pubsub.Subscription),
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
}
|
||||
require.NoError(t, r.subscribeToSubnets(sp))
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
for _, t := range r.cfg.p2p.PubSub().GetTopics() {
|
||||
@@ -609,25 +606,37 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
secondSub := fmt.Sprintf(p2p.SyncCommitteeSubnetTopicFormat, digest, 1) + r.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, topicMap[secondSub])
|
||||
|
||||
// Expect that all old topics will be unsubscribed.
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
electraSlot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
mockNow.SetSlot(t, clock, electraSlot)
|
||||
digest = params.ForkDigest(r.cfg.clock.CurrentEpoch())
|
||||
version, e, err = params.ForkDataFromDigest(digest)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().ElectraForkVersion), version)
|
||||
require.Equal(t, params.BeaconConfig().ElectraForkEpoch, e)
|
||||
|
||||
cancel()
|
||||
sp.digest = digest
|
||||
// clear the cache and re-subscribe to subnets.
|
||||
// this should result in the subscriptions being removed
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
require.NoError(t, r.subscribeToSubnets(sp))
|
||||
assert.Equal(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
}
|
||||
|
||||
func TestIsDigestValid(t *testing.T) {
|
||||
genRoot := [32]byte{'A'}
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:])
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
clock := startup.NewClock(time.Now().Add(-100*time.Second), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
valid, err := isDigestValid(digest, time.Now().Add(-100*time.Second), genRoot)
|
||||
valid, err := isDigestValid(digest, clock)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, valid)
|
||||
|
||||
// Compute future fork digest that will be invalid currently.
|
||||
digest, err = signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genRoot[:])
|
||||
digest, err = signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
valid, err = isDigestValid(digest, time.Now().Add(-100*time.Second), genRoot)
|
||||
valid, err = isDigestValid(digest, clock)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, valid)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
)
|
||||
@@ -18,8 +19,8 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
assert.Equal(t, false, h.topicExists("junk"))
|
||||
assert.Equal(t, false, h.digestExists([4]byte{}))
|
||||
|
||||
digest, err := forks.CreateForkDigest(time.Now(), make([]byte, 32))
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), [32]byte{})
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
enc := encoder.SszNetworkEncoder{}
|
||||
|
||||
// Valid topic added in.
|
||||
|
||||
@@ -309,24 +309,15 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
|
||||
func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
fvs := map[[fieldparams.VersionLength]byte]primitives.Epoch{}
|
||||
fvs[bytesutil.ToBytes4(cfg.GenesisForkVersion)] = 1
|
||||
fvs[bytesutil.ToBytes4(cfg.AltairForkVersion)] = 2
|
||||
fvs[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = 3
|
||||
fvs[bytesutil.ToBytes4(cfg.CapellaForkVersion)] = 4
|
||||
fvs[bytesutil.ToBytes4(cfg.DenebForkVersion)] = 5
|
||||
fvs[bytesutil.ToBytes4(cfg.FuluForkVersion)] = 6
|
||||
fvs[bytesutil.ToBytes4(cfg.ElectraForkVersion)] = 0
|
||||
cfg.ForkVersionSchedule = fvs
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
db := dbtest.SetupDB(t)
|
||||
currentSlot := 1 + (primitives.Slot(params.BeaconConfig().ElectraForkEpoch) * params.BeaconConfig().SlotsPerEpoch)
|
||||
genesisOffset := time.Duration(currentSlot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
chain := &mockChain.ChainService{
|
||||
// 1 slot ago.
|
||||
Genesis: time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-1 * genesisOffset),
|
||||
ValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot,
|
||||
ValidAttestation: true,
|
||||
DB: db,
|
||||
Optimistic: true,
|
||||
@@ -347,6 +338,7 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
require.Equal(t, currentSlot, s.cfg.clock.CurrentSlot())
|
||||
s.initCaches()
|
||||
go s.verifierRoutine()
|
||||
|
||||
@@ -354,7 +346,7 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.Slot = s.cfg.clock.CurrentSlot()
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
|
||||
validBlockRoot, err := blk.Block.HashTreeRoot()
|
||||
@@ -366,10 +358,10 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
|
||||
validators := uint64(64)
|
||||
savedState, keys := util.DeterministicGenesisState(t, validators)
|
||||
require.NoError(t, savedState.SetSlot(1))
|
||||
require.NoError(t, savedState.SetSlot(s.cfg.clock.CurrentSlot()))
|
||||
require.NoError(t, db.SaveState(t.Context(), savedState, validBlockRoot))
|
||||
chain.State = savedState
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, savedState, 1, 0)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, savedState, s.cfg.clock.CurrentSlot(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
@@ -383,9 +375,9 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: validBlockRoot[:],
|
||||
CommitteeIndex: 0,
|
||||
Slot: 1,
|
||||
Slot: s.cfg.clock.CurrentSlot(),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Epoch: s.cfg.clock.CurrentEpoch(),
|
||||
Root: validBlockRoot[:],
|
||||
},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -400,9 +392,9 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: validBlockRoot[:],
|
||||
CommitteeIndex: 1,
|
||||
Slot: 1,
|
||||
Slot: s.cfg.clock.CurrentSlot(),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Epoch: s.cfg.clock.CurrentEpoch(),
|
||||
Root: validBlockRoot[:],
|
||||
},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -417,9 +409,9 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: validBlockRoot[:],
|
||||
CommitteeIndex: 1,
|
||||
Slot: 1,
|
||||
Slot: s.cfg.clock.CurrentSlot(),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Epoch: s.cfg.clock.CurrentEpoch(),
|
||||
Root: validBlockRoot[:],
|
||||
},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
|
||||
@@ -779,6 +779,8 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
@@ -791,7 +793,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
},
|
||||
ValidatorsRoot: [32]byte{},
|
||||
ValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot,
|
||||
}
|
||||
|
||||
r := &Service{
|
||||
@@ -814,7 +816,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, make([]byte, 32))
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
topic := fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(b)], digest)
|
||||
m := &pubsub.Message{
|
||||
@@ -1187,6 +1189,8 @@ func TestService_isBlockQueueable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := t.Context()
|
||||
@@ -1219,7 +1223,8 @@ func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) {
|
||||
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
chainService := &mock.ChainService{Genesis: now.Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
DB: db,
|
||||
ValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot,
|
||||
DB: db,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
@@ -1419,6 +1424,8 @@ func Test_validateBellatrixBeaconBlockParentValidation(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := t.Context()
|
||||
@@ -1450,8 +1457,9 @@ func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &mock.ChainService{Genesis: beaconState.GenesisTime(),
|
||||
DB: db,
|
||||
Optimistic: true,
|
||||
ValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot,
|
||||
DB: db,
|
||||
Optimistic: true,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
|
||||
@@ -68,7 +68,7 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
startTime, err := slots.StartTime(s.cfg.chain.GenesisTime(), blob.Slot())
|
||||
startTime, err := slots.StartTime(s.cfg.clock.GenesisTime(), blob.Slot())
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
@@ -142,10 +142,12 @@ func TestValidateBlob_AlreadySeenInCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateBlob_InvalidTopicIndex(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, params.BeaconConfig().GenesisValidatorsRoot)}}
|
||||
s.newBlobVerifier = testNewBlobVerifier()
|
||||
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, chainService.CurrentSlot()+1, 1)
|
||||
@@ -163,7 +165,7 @@ func TestValidateBlob_InvalidTopicIndex(t *testing.T) {
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.ErrorContains(t, "/eth2/f5a5fd42/blob_sidecar_1", err)
|
||||
require.ErrorContains(t, "blob_sidecar_1", err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
|
||||
@@ -195,7 +195,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
dataColumnSidecarVerificationSuccessesCounter.Inc()
|
||||
|
||||
// Get the time at slot start.
|
||||
startTime, err := slots.StartTime(s.cfg.chain.GenesisTime(), roDataColumn.SignedBlockHeader.Header.Slot)
|
||||
startTime, err := slots.StartTime(s.cfg.clock.GenesisTime(), roDataColumn.SignedBlockHeader.Header.Slot)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -224,8 +223,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(slots.PrevSlot(hState.Slot())))
|
||||
vr := [32]byte{'A'}
|
||||
clock := startup.NewClock(gt, vr)
|
||||
digest, err := forks.CreateForkDigest(gt, vr[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(slots.ToEpoch(clock.CurrentSlot()))
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 5)
|
||||
|
||||
return s, actualTopic, clock
|
||||
@@ -270,8 +268,8 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
|
||||
gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(slots.PrevSlot(hState.Slot())))
|
||||
vr := [32]byte{'A'}
|
||||
digest, err := forks.CreateForkDigest(gt, vr[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(gt, vr)
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 5)
|
||||
|
||||
return s, actualTopic, startup.NewClock(gt, vr)
|
||||
@@ -324,8 +322,8 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
// Set Topic and Subnet
|
||||
gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(slots.PrevSlot(hState.Slot())))
|
||||
vr := [32]byte{'A'}
|
||||
digest, err := forks.CreateForkDigest(gt, vr[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(gt, vr)
|
||||
digest := params.ForkDigest(slots.ToEpoch(clock.CurrentSlot()))
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 5)
|
||||
|
||||
return s, actualTopic, startup.NewClock(gt, vr)
|
||||
@@ -382,8 +380,8 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
// Set Topic and Subnet
|
||||
gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(slots.PrevSlot(hState.Slot())))
|
||||
vr := [32]byte{'A'}
|
||||
digest, err := forks.CreateForkDigest(gt, vr[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(gt, vr)
|
||||
digest := params.ForkDigest(slots.ToEpoch(clock.CurrentSlot()))
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 1)
|
||||
|
||||
return s, actualTopic, startup.NewClock(gt, vr)
|
||||
|
||||
@@ -35,7 +35,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
@@ -63,7 +62,7 @@ func (d signatureData) logFields() logrus.Fields {
|
||||
|
||||
func newSigCache(vr []byte, size int, gf forkLookup) *sigCache {
|
||||
if gf == nil {
|
||||
gf = forks.Fork
|
||||
gf = params.Fork
|
||||
}
|
||||
return &sigCache{Cache: lruwrpr.New(size), valRoot: vr, getFork: gf}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user