mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 21:08:10 -05:00
Refactor fork schedules (#15490)
* overhaul fork schedule management for bpos * Unify log * Radek's comments * Use arg config to determine previous epoch, with regression test * Remove unnecessary NewClock. @potuz feedback * Continuation of previous commit: Remove unnecessary NewClock. @potuz feedback * Remove VerifyBlockHeaderSignatureUsingCurrentFork * cosmetic changes * Remove unnecessary copy. entryWithForkDigest passes by value, not by pointer so it shold be fine * Reuse ErrInvalidTopic from p2p package * Unskip TestServer_GetBeaconConfig * Resolve TODO about forkwatcher in local mode * remove Copy() --------- Co-authored-by: Kasey <kasey@users.noreply.github.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: rkapka <radoslaw.kapka@gmail.com> Co-authored-by: Preston Van Loon <preston@pvl.dev>
This commit is contained in:
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -101,7 +100,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"domain.go",
|
||||
"signature.go",
|
||||
"signing_root.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
|
||||
@@ -25,7 +24,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"domain_test.go",
|
||||
"signature_test.go",
|
||||
"signing_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
// Per spec, we want the fork version and genesis validator to be nil.
|
||||
// Which is genesis value and zero by default.
|
||||
sd, err := ComputeDomain(
|
||||
d,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package signing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(reg, domain)
|
||||
require.NoError(t, err)
|
||||
sk.Sign(sr[:]).Marshal()
|
||||
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
|
||||
}
|
||||
@@ -186,6 +186,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.ConfigOptions = append([]params.Option{params.WithGenesisValidatorsRoot(genesis.ValidatorsRoot())}, beacon.ConfigOptions...)
|
||||
params.BeaconConfig().ApplyOptions(beacon.ConfigOptions...)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
params.LogDigests(params.BeaconConfig())
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
|
||||
@@ -72,7 +72,6 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
@@ -169,7 +168,6 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
@@ -179,6 +177,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -274,14 +273,8 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return errors.New("attempted to broadcast nil light client optimistic update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -300,13 +293,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return errors.New("attempted to broadcast nil light client finality update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
|
||||
@@ -15,12 +15,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -59,6 +60,7 @@ func TestService_Broadcast(t *testing.T) {
|
||||
topic := "/eth2/%x/testing"
|
||||
// Set a test gossip mapping for testpb.TestSimpleMessage.
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest)
|
||||
@@ -551,9 +553,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
@@ -617,9 +617,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
|
||||
@@ -585,8 +585,11 @@ func (s *Service) createLocalNode(
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
current := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
next := params.NextNetworkScheduleEntry(currentEpoch)
|
||||
if err := updateENR(localNode, current, next); err != nil {
|
||||
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
|
||||
}
|
||||
|
||||
@@ -707,7 +710,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
// Ignore nodes that don't match our fork digest.
|
||||
nodeENR := node.Record()
|
||||
if s.genesisValidatorsRoot != nil {
|
||||
if err := s.compareForkENR(nodeENR); err != nil {
|
||||
if err := compareForkENR(s.dv5Listener.LocalNode().Node().Record(), nodeENR); err != nil {
|
||||
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -3,12 +3,9 @@ package p2p
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
@@ -16,6 +13,8 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errEth2ENRDigestMismatch = errors.New("fork digest of peer does not match local value")
|
||||
|
||||
// ENR key used for Ethereum consensus-related fork data.
|
||||
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
|
||||
|
||||
@@ -25,29 +24,31 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
if !s.isInitialized() {
|
||||
return [4]byte{}, errors.New("state is not initialized")
|
||||
}
|
||||
return forks.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot)
|
||||
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
return params.ForkDigest(currentEpoch), nil
|
||||
}
|
||||
|
||||
// Compares fork ENRs between an incoming peer's record and our node's
|
||||
// local record values for current and next fork version/epoch.
|
||||
func (s *Service) compareForkENR(record *enr.Record) error {
|
||||
currentRecord := s.dv5Listener.LocalNode().Node().Record()
|
||||
peerForkENR, err := forkEntry(record)
|
||||
func compareForkENR(self, peer *enr.Record) error {
|
||||
peerForkENR, err := forkEntry(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentForkENR, err := forkEntry(currentRecord)
|
||||
currentForkENR, err := forkEntry(self)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enrString, err := SerializeENR(record)
|
||||
enrString, err := SerializeENR(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Clients SHOULD connect to peers with current_fork_digest, next_fork_version,
|
||||
// and next_fork_epoch that match local values.
|
||||
if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) {
|
||||
return fmt.Errorf(
|
||||
return errors.Wrapf(errEth2ENRDigestMismatch,
|
||||
"fork digest of peer with ENR %s: %v, does not match local value: %v",
|
||||
enrString,
|
||||
peerForkENR.CurrentForkDigest,
|
||||
@@ -74,41 +75,24 @@ func (s *Service) compareForkENR(record *enr.Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds a fork entry as an ENR record under the Ethereum consensus EnrKey for
|
||||
// the local node. The fork entry is an ssz-encoded enrForkID type
|
||||
// which takes into account the current fork version from the current
|
||||
// epoch to create a fork digest, the next fork version,
|
||||
// and the next fork epoch.
|
||||
func addForkEntry(
|
||||
node *enode.LocalNode,
|
||||
genesisTime time.Time,
|
||||
genesisValidatorsRoot []byte,
|
||||
) (*enode.LocalNode, error) {
|
||||
digest, err := forks.CreateForkDigest(genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
if prysmTime.Now().Before(genesisTime) {
|
||||
currentEpoch = 0
|
||||
}
|
||||
nextForkVersion, nextForkEpoch, err := forks.NextForkData(currentEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) error {
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion[:],
|
||||
NextForkEpoch: nextForkEpoch,
|
||||
CurrentForkDigest: entry.ForkDigest[:],
|
||||
NextForkVersion: next.ForkVersion[:],
|
||||
NextForkEpoch: next.Epoch,
|
||||
}
|
||||
log.
|
||||
WithField("CurrentForkDigest", fmt.Sprintf("%#x", enrForkID.CurrentForkDigest)).
|
||||
WithField("NextForkVersion", fmt.Sprintf("%#x", enrForkID.NextForkVersion)).
|
||||
WithField("NextForkEpoch", fmt.Sprintf("%d", enrForkID.NextForkEpoch)).
|
||||
Info("Updating ENR Fork ID")
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
forkEntry := enr.WithEntry(eth2ENRKey, enc)
|
||||
node.Set(forkEntry)
|
||||
return node, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieves an enrForkID from an ENR record by key lookup
|
||||
|
||||
@@ -8,254 +8,121 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
for i := 1; i <= 5; i++ {
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
// We give every peer a different genesis validators root, which
|
||||
// will cause each peer to have a different ForkDigest, preventing
|
||||
// them from connecting according to our discovery rules for Ethereum consensus.
|
||||
root := make([]byte, 32)
|
||||
copy(root, strconv.Itoa(port))
|
||||
s = &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: root,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
for _, listener := range listeners {
|
||||
listener.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(discoveryWaitTime)
|
||||
|
||||
lastListener := listeners[len(listeners)-1]
|
||||
nodes := lastListener.Lookup(bootNode.ID())
|
||||
if len(nodes) < 4 {
|
||||
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
|
||||
"Expected more than or equal to %d but got %d", 4, len(nodes))
|
||||
}
|
||||
|
||||
// Now, we start a new p2p service. It should have no peers aside from the
|
||||
// bootnode given all nodes provided by discv5 will have different fork digests.
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
|
||||
// We should not have valid peers if the fork digest mismatched.
|
||||
assert.Equal(t, 0, len(addrs), "Expected 0 valid peers")
|
||||
require.NoError(t, s.Stop())
|
||||
}
|
||||
|
||||
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
func TestCompareForkENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true, DB: db},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
_, k := createAddrAndPrivKey(t)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
self := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(self, current, next))
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
cases := []struct {
|
||||
name string
|
||||
expectErr error
|
||||
expectLog string
|
||||
node func(t *testing.T) *enode.Node
|
||||
}{
|
||||
{
|
||||
name: "match",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "current digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
testDigest := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, current.ForkDigest, testDigest, "ensure test fork digest is unique")
|
||||
currentCopy := current
|
||||
currentCopy.ForkDigest = testDigest
|
||||
require.NoError(t, updateENR(peer, currentCopy, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errEth2ENRDigestMismatch,
|
||||
},
|
||||
{
|
||||
name: "next fork version mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
testVersion := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, next.ForkVersion, testVersion, "ensure test fork version is unique")
|
||||
nextCopy := next
|
||||
nextCopy.ForkVersion = testVersion
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork version",
|
||||
},
|
||||
{
|
||||
name: "next fork epoch mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
nextCopy := next
|
||||
nextCopy.Epoch = nextCopy.Epoch + 1
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork epoch",
|
||||
},
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
for i := 1; i <= 5; i++ {
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
nextForkEpoch := primitives.Epoch(i)
|
||||
c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
// We give every peer a different genesis validators root, which
|
||||
// will cause each peer to have a different ForkDigest, preventing
|
||||
// them from connecting according to our discovery rules for Ethereum consensus.
|
||||
s = &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
peer := c.node(t)
|
||||
err := compareForkENR(self.Node().Record(), peer.Record())
|
||||
if c.expectErr != nil {
|
||||
require.ErrorIs(t, err, c.expectErr, "Expected error to match")
|
||||
} else {
|
||||
require.NoError(t, err, "Expected no error comparing fork ENRs")
|
||||
}
|
||||
if c.expectLog != "" {
|
||||
require.LogsContain(t, hook, c.expectLog, "Expected log message not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
for _, listener := range listeners {
|
||||
listener.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(discoveryWaitTime)
|
||||
|
||||
lastListener := listeners[len(listeners)-1]
|
||||
nodes := lastListener.Lookup(bootNode.ID())
|
||||
if len(nodes) < 4 {
|
||||
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
|
||||
"Expected more than or equal to %d but got %d", 4, len(nodes))
|
||||
}
|
||||
|
||||
// Now, we start a new p2p service. It should have no peers aside from the
|
||||
// bootnode given all nodes provided by discv5 will have different fork digests.
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
cfg.StateNotifier = &mock.MockStateNotifier{}
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
addrs := make([]ma.Multiaddr, 0, len(nodes))
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
t.Error("Expected to have valid peers, got 0")
|
||||
}
|
||||
|
||||
require.LogsContain(t, hook, "Peer matches fork digest but has different next fork epoch")
|
||||
require.NoError(t, s.Stop())
|
||||
}
|
||||
|
||||
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): 0,
|
||||
{0, 0, 0, 1}: 1,
|
||||
}
|
||||
nextForkEpoch := primitives.Epoch(1)
|
||||
nextForkVersion := []byte{0, 0, 0, 1}
|
||||
params.OverrideBeaconConfig(c)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
digest, err := forks.CreateForkDigest(genesisTime, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion,
|
||||
NextForkEpoch: nextForkEpoch,
|
||||
CurrentForkDigest: current.ForkDigest[:],
|
||||
NextForkVersion: next.ForkVersion[:],
|
||||
NextForkEpoch: next.Epoch,
|
||||
}
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
entry := enr.WithEntry(eth2ENRKey, enc)
|
||||
// In epoch 1 of current time, the fork version should be
|
||||
// {0, 0, 0, 1} according to the configuration override above.
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
@@ -267,18 +134,16 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
localNode.Set(entry)
|
||||
|
||||
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, want[:], resp.CurrentForkDigest)
|
||||
assert.DeepEqual(t, nextForkVersion, resp.NextForkVersion)
|
||||
assert.Equal(t, nextForkEpoch, resp.NextForkEpoch, "Unexpected next fork epoch")
|
||||
assert.Equal(t, hexutil.Encode(current.ForkDigest[:]), hexutil.Encode(resp.CurrentForkDigest))
|
||||
assert.Equal(t, hexutil.Encode(next.ForkVersion[:]), hexutil.Encode(resp.NextForkVersion))
|
||||
assert.Equal(t, next.Epoch, resp.NextForkEpoch, "Unexpected next fork epoch")
|
||||
}
|
||||
|
||||
func TestAddForkEntry_Genesis(t *testing.T) {
|
||||
func TestAddForkEntry_NextForkVersion(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
@@ -288,17 +153,27 @@ func TestAddForkEntry_Genesis(t *testing.T) {
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
|
||||
bCfg := params.MainnetConfig()
|
||||
bCfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{}
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] = bCfg.GenesisEpoch
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32))
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
// Add the fork entry to the local node's ENR.
|
||||
require.NoError(t, updateENR(localNode, current, next))
|
||||
fe, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
forkEntry, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t,
|
||||
params.BeaconConfig().GenesisForkVersion, forkEntry.NextForkVersion,
|
||||
assert.Equal(t,
|
||||
hexutil.Encode(params.BeaconConfig().AltairForkVersion), hexutil.Encode(fe.NextForkVersion),
|
||||
"Wanted Next Fork Version to be equal to genesis fork version")
|
||||
|
||||
last := params.LastForkEpoch()
|
||||
current = params.GetNetworkScheduleEntry(last)
|
||||
next = params.NextNetworkScheduleEntry(last)
|
||||
require.NoError(t, updateENR(localNode, current, next))
|
||||
entry := params.NextNetworkScheduleEntry(last)
|
||||
fe, err = forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
hexutil.Encode(entry.ForkVersion[:]), hexutil.Encode(fe.NextForkVersion),
|
||||
"Wanted Next Fork Version to be equal to last entry in schedule")
|
||||
|
||||
}
|
||||
|
||||
@@ -9,27 +9,26 @@ import (
|
||||
// updates the node's discovery service to reflect any new fork version
|
||||
// changes.
|
||||
func (s *Service) forkWatcher() {
|
||||
// Exit early if discovery is disabled - there's no ENR to update
|
||||
if s.dv5Listener == nil {
|
||||
log.Debug("Discovery disabled, exiting fork watcher")
|
||||
return
|
||||
}
|
||||
|
||||
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
var scheduleEntry params.NetworkScheduleEntry
|
||||
for {
|
||||
select {
|
||||
case currSlot := <-slotTicker.C():
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().DenebForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().ElectraForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().FuluForkEpoch {
|
||||
// If we are in the fork epoch, we update our enr with
|
||||
// the updated fork digest. These repeatedly does
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
// but is fine nonetheless.
|
||||
if s.dv5Listener != nil { // make sure it's not a local network
|
||||
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add fork entry")
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(currSlot)
|
||||
newEntry := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
if newEntry.ForkDigest != scheduleEntry.ForkDigest {
|
||||
nextEntry := params.NextNetworkScheduleEntry(currentEpoch)
|
||||
if err := updateENR(s.dv5Listener.LocalNode(), newEntry, nextEntry); err != nil {
|
||||
log.WithFields(newEntry.LogFields()).WithError(err).Error("Could not add fork entry")
|
||||
continue // don't replace scheduleEntry until this succeeds
|
||||
}
|
||||
scheduleEntry = newEntry
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
@@ -39,7 +38,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
}
|
||||
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
|
||||
_, fEpoch, err := params.ForkDataFromDigest(digest)
|
||||
if err != nil {
|
||||
// Impossible condition that should
|
||||
// never be hit.
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/golang/snappy"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -18,28 +18,27 @@ import (
|
||||
|
||||
func TestMsgID_HashesCorrectly(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), bytesutil.ToBytes32([]byte{'A'}))
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
d := params.ForkDigest(clock.CurrentEpoch())
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
|
||||
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
|
||||
hashedData := hash.Hash(append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
|
||||
hashedData = hash.Hash(append(params.BeaconConfig().MessageDomainValidSnappy[:], validObj[:]...))
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genesisValidatorsRoot)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
@@ -52,7 +51,7 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
combinedObj = append(combinedObj, pMsg.Data...)
|
||||
hashedData := hash.Hash(combinedObj)
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
@@ -63,13 +62,12 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
combinedObj = append(combinedObj, validObj[:]...)
|
||||
hashedData = hash.Hash(combinedObj)
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
@@ -82,7 +80,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
combinedObj = append(combinedObj, pMsg.Data...)
|
||||
hashedData := hash.Hash(combinedObj)
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
@@ -93,7 +91,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
combinedObj = append(combinedObj, validObj[:]...)
|
||||
hashedData = hash.Hash(combinedObj)
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMsgID_WithNilTopic(t *testing.T) {
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
rSubD = 8 // random gossip target
|
||||
)
|
||||
|
||||
var errInvalidTopic = errors.New("invalid topic format")
|
||||
var ErrInvalidTopic = errors.New("invalid topic format")
|
||||
|
||||
// Specifies the fixed size context length.
|
||||
const digestLength = 4
|
||||
@@ -219,12 +219,12 @@ func convertTopicScores(topicMap map[string]*pubsub.TopicScoreSnapshot) map[stri
|
||||
func ExtractGossipDigest(topic string) ([4]byte, error) {
|
||||
// Ensure the topic prefix is correct.
|
||||
if len(topic) < len(gossipTopicPrefix)+1 || topic[:len(gossipTopicPrefix)] != gossipTopicPrefix {
|
||||
return [4]byte{}, errInvalidTopic
|
||||
return [4]byte{}, ErrInvalidTopic
|
||||
}
|
||||
start := len(gossipTopicPrefix)
|
||||
end := strings.Index(topic[start:], "/")
|
||||
if end == -1 { // Ensure a topic suffix exists.
|
||||
return [4]byte{}, errInvalidTopic
|
||||
return [4]byte{}, ErrInvalidTopic
|
||||
}
|
||||
end += start
|
||||
strDigest := topic[start:end]
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -32,6 +32,14 @@ var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
|
||||
const pubsubSubscriptionRequestLimit = 500
|
||||
|
||||
func (s *Service) setAllForkDigests() {
|
||||
entries := params.SortedNetworkScheduleEntries()
|
||||
s.allForkDigests = make(map[[4]byte]struct{}, len(entries))
|
||||
for _, entry := range entries {
|
||||
s.allForkDigests[entry.ForkDigest] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
if !s.isInitialized() {
|
||||
@@ -48,50 +56,18 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
if parts[1] != "eth2" {
|
||||
return false
|
||||
}
|
||||
phase0ForkDigest, err := s.currentForkDigest()
|
||||
|
||||
var digest [4]byte
|
||||
dl, err := hex.Decode(digest[:], []byte(parts[2]))
|
||||
if err == nil && dl != 4 {
|
||||
err = fmt.Errorf("expected 4 bytes, got %d", dl)
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine fork digest")
|
||||
log.WithError(err).WithField("topic", topic).WithField("digest", parts[2]).Error("CanSubscribe failed to parse message")
|
||||
return false
|
||||
}
|
||||
altairForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine altair fork digest")
|
||||
return false
|
||||
}
|
||||
bellatrixForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Bellatrix fork digest")
|
||||
return false
|
||||
}
|
||||
capellaForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Deneb fork digest")
|
||||
return false
|
||||
}
|
||||
electraForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Electra fork digest")
|
||||
return false
|
||||
}
|
||||
fuluForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Fulu fork digest")
|
||||
return false
|
||||
}
|
||||
switch parts[2] {
|
||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||
case fmt.Sprintf("%x", altairForkDigest):
|
||||
case fmt.Sprintf("%x", bellatrixForkDigest):
|
||||
case fmt.Sprintf("%x", capellaForkDigest):
|
||||
case fmt.Sprintf("%x", denebForkDigest):
|
||||
case fmt.Sprintf("%x", electraForkDigest):
|
||||
case fmt.Sprintf("%x", fuluForkDigest):
|
||||
default:
|
||||
if _, ok := s.allForkDigests[digest]; !ok {
|
||||
log.WithField("topic", topic).WithField("digest", fmt.Sprintf("%#x", digest)).Error("CanSubscribe failed to find digest in allForkDigests")
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -22,12 +20,11 @@ import (
|
||||
|
||||
func TestService_CanSubscribe(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
currentFork := params.GetNetworkScheduleEntry(clock.CurrentEpoch()).ForkDigest
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
type test struct {
|
||||
name string
|
||||
topic string
|
||||
@@ -109,12 +106,14 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
}
|
||||
tests = append(tests, tt)
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: genesisTime,
|
||||
genesisTime: clock.GenesisTime(),
|
||||
}
|
||||
s.setAllForkDigests()
|
||||
if got := s.CanSubscribe(tt.topic); got != tt.want {
|
||||
t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want)
|
||||
}
|
||||
@@ -220,11 +219,10 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
|
||||
|
||||
func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
type args struct {
|
||||
id peer.ID
|
||||
subs []*pubsubpb.RPC_SubOpts
|
||||
@@ -321,12 +319,14 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: genesisTime,
|
||||
genesisTime: clock.GenesisTime(),
|
||||
}
|
||||
s.setAllForkDigests()
|
||||
got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("FilterIncomingSubscriptions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -63,42 +64,42 @@ var (
|
||||
)
|
||||
|
||||
// Service for managing peer to peer (p2p) networking.
|
||||
type (
|
||||
Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
}
|
||||
type Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
clock *startup.Clock
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
}
|
||||
|
||||
custodyInfo struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
groupCount uint64
|
||||
}
|
||||
)
|
||||
type custodyInfo struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
groupCount uint64
|
||||
}
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
// connections are made until the Start function is called during the service registry startup.
|
||||
@@ -202,6 +203,7 @@ func (s *Service) Start() {
|
||||
// Waits until the state is initialized via an event feed.
|
||||
// Used for fork-related data when connecting peers.
|
||||
s.awaitStateInitialized()
|
||||
s.setAllForkDigests()
|
||||
s.isPreGenesis = false
|
||||
|
||||
var relayNodes []string
|
||||
@@ -455,7 +457,7 @@ func (s *Service) awaitStateInitialized() {
|
||||
s.genesisTime = clock.GenesisTime()
|
||||
gvr := clock.GenesisValidatorsRoot()
|
||||
s.genesisValidatorsRoot = gvr[:]
|
||||
_, err = s.currentForkDigest() // initialize fork digest cache
|
||||
_, err = s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize fork digest")
|
||||
}
|
||||
|
||||
@@ -16,8 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
@@ -346,14 +344,16 @@ func TestPeer_Disconnect(t *testing.T) {
|
||||
|
||||
func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
gs := startup.NewClockSynchronizer()
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs, DB: testDB.SetupDB(t)})
|
||||
require.NoError(t, err)
|
||||
|
||||
go s.awaitStateInitialized()
|
||||
fd := initializeStateWithForkDigest(ctx, t, gs)
|
||||
s.setAllForkDigests()
|
||||
s.awaitStateInitialized()
|
||||
|
||||
assert.Equal(t, 0, len(s.joinedTopics))
|
||||
|
||||
@@ -382,15 +382,13 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
// digest associated with that genesis event.
|
||||
func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte {
|
||||
gt := prysmTime.Now()
|
||||
gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32))
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr)))
|
||||
|
||||
fd, err := forks.CreateForkDigest(gt, gvr[:])
|
||||
require.NoError(t, err)
|
||||
gvr := params.BeaconConfig().GenesisValidatorsRoot
|
||||
clock := startup.NewClock(gt, gvr)
|
||||
require.NoError(t, gs.SetClock(clock))
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
|
||||
|
||||
return fd
|
||||
return params.ForkDigest(clock.CurrentEpoch())
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
|
||||
@@ -3,7 +3,6 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -36,17 +35,8 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
// find and connect to a node already subscribed to a specific subnet.
|
||||
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
|
||||
|
||||
// Define the genesis validators root, to ensure everybody is on the same network.
|
||||
const (
|
||||
genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
|
||||
subnetCount = 3
|
||||
minimumPeersPerSubnet = 1
|
||||
)
|
||||
|
||||
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a context.
|
||||
const subnetCount = 3
|
||||
const minimumPeersPerSubnet = 1
|
||||
ctx := t.Context()
|
||||
|
||||
// Use shorter period for testing.
|
||||
@@ -58,6 +48,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
|
||||
// Create flags.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
@@ -74,7 +65,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
bootNodeService := &Service{
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
genesisValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot[:],
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
@@ -111,7 +102,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
nodeForkDigest, err := service.currentForkDigest()
|
||||
@@ -158,11 +149,11 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
DB: db,
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
service.Start()
|
||||
|
||||
@@ -9,7 +9,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
@@ -24,8 +23,6 @@ go_test(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -35,34 +34,25 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "config.GetForkSchedule")
|
||||
defer span.End()
|
||||
|
||||
schedule := params.BeaconConfig().ForkVersionSchedule
|
||||
schedule := params.SortedForkSchedule()
|
||||
data := make([]*structs.Fork, 0, len(schedule))
|
||||
if len(schedule) == 0 {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: make([]*structs.Fork, 0),
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
versions := forks.SortedForkVersions(schedule)
|
||||
chainForks := make([]*structs.Fork, len(schedule))
|
||||
var previous, current []byte
|
||||
for i, v := range versions {
|
||||
if i == 0 {
|
||||
previous = params.BeaconConfig().GenesisForkVersion
|
||||
} else {
|
||||
previous = current
|
||||
}
|
||||
copyV := v
|
||||
current = copyV[:]
|
||||
chainForks[i] = &structs.Fork{
|
||||
PreviousVersion: hexutil.Encode(previous),
|
||||
CurrentVersion: hexutil.Encode(current),
|
||||
Epoch: fmt.Sprintf("%d", schedule[v]),
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
data = append(data, &structs.Fork{
|
||||
PreviousVersion: hexutil.Encode(previous.ForkVersion[:]),
|
||||
CurrentVersion: hexutil.Encode(entry.ForkVersion[:]),
|
||||
Epoch: fmt.Sprintf("%d", entry.Epoch),
|
||||
})
|
||||
previous = entry
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: chainForks,
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -592,43 +590,34 @@ func TestGetSpec(t *testing.T) {
|
||||
|
||||
func TestForkSchedule_Ok(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
genesisForkVersion := []byte("Genesis")
|
||||
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
|
||||
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
|
||||
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisForkVersion = genesisForkVersion
|
||||
// Create fork schedule adding keys in non-sorted order.
|
||||
schedule := make(map[[4]byte]primitives.Epoch, 3)
|
||||
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
|
||||
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
|
||||
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
|
||||
config.ForkVersionSchedule = schedule
|
||||
params.OverrideBeaconConfig(config)
|
||||
config.InitializeForkSchedule()
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
genesisStr, firstStr, secondStr := hexutil.Encode(config.GenesisForkVersion), hexutil.Encode(config.AltairForkVersion), hexutil.Encode(config.BellatrixForkVersion)
|
||||
GetForkSchedule(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
schedule := params.SortedForkSchedule()
|
||||
require.Equal(t, len(schedule), len(resp.Data))
|
||||
fork := resp.Data[0]
|
||||
assert.DeepEqual(t, hexutil.Encode(genesisForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", firstForkEpoch), fork.Epoch)
|
||||
assert.Equal(t, genesisStr, fork.PreviousVersion)
|
||||
assert.Equal(t, genesisStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.GenesisEpoch), fork.Epoch)
|
||||
fork = resp.Data[1]
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", secondForkEpoch), fork.Epoch)
|
||||
assert.Equal(t, genesisStr, fork.PreviousVersion)
|
||||
assert.Equal(t, firstStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.AltairForkEpoch), fork.Epoch)
|
||||
fork = resp.Data[2]
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(thirdForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", thirdForkEpoch), fork.Epoch)
|
||||
assert.Equal(t, firstStr, fork.PreviousVersion)
|
||||
assert.Equal(t, secondStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.BellatrixForkEpoch), fork.Epoch)
|
||||
})
|
||||
t.Run("correct number of forks", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
@@ -639,8 +628,8 @@ func TestForkSchedule_Ok(t *testing.T) {
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
os := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
assert.Equal(t, os.Len(), len(resp.Data))
|
||||
os := params.SortedForkSchedule()
|
||||
assert.Equal(t, len(os), len(resp.Data))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -12,12 +12,10 @@ go_library(
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -7,12 +7,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -111,17 +109,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
updateEpoch := slots.ToEpoch(updateSlot)
|
||||
updateFork, err := forks.Fork(updateEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get fork Version: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
forkDigest, err := signing.ComputeForkDigest(updateFork.CurrentVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not compute fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
updateEntry := params.GetNetworkScheduleEntry(updateEpoch)
|
||||
updateSSZ, err := update.MarshalSSZ()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not marshal update to SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
@@ -133,7 +121,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if _, err := w.Write(forkDigest[:]); err != nil {
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
|
||||
@@ -19,7 +19,10 @@ func (_ *Server) GetBeaconConfig(_ context.Context, _ *emptypb.Empty) (*ethpb.Be
|
||||
numFields := val.Type().NumField()
|
||||
res := make(map[string]string, numFields)
|
||||
for i := 0; i < numFields; i++ {
|
||||
res[val.Type().Field(i).Name] = fmt.Sprintf("%v", val.Field(i).Interface())
|
||||
field := val.Type().Field(i)
|
||||
if field.IsExported() {
|
||||
res[field.Name] = fmt.Sprintf("%v", val.Field(i).Interface())
|
||||
}
|
||||
}
|
||||
return ðpb.BeaconConfig{
|
||||
Config: res,
|
||||
|
||||
@@ -17,10 +17,19 @@ func TestServer_GetBeaconConfig(t *testing.T) {
|
||||
res, err := bs.GetBeaconConfig(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
conf := params.BeaconConfig()
|
||||
numFields := reflect.TypeOf(conf).Elem().NumField()
|
||||
confType := reflect.TypeOf(conf).Elem()
|
||||
numFields := confType.NumField()
|
||||
|
||||
// Count only exported fields, as unexported fields are not included in the config
|
||||
exportedFields := 0
|
||||
for i := 0; i < numFields; i++ {
|
||||
if confType.Field(i).IsExported() {
|
||||
exportedFields++
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the result has the same number of items as our config struct.
|
||||
assert.Equal(t, numFields, len(res.Config), "Unexpected number of items in config")
|
||||
// Check if the result has the same number of items as exported fields in our config struct.
|
||||
assert.Equal(t, exportedFields, len(res.Config), "Unexpected number of items in config")
|
||||
want := fmt.Sprintf("%d", conf.Eth1FollowDistance)
|
||||
|
||||
// Check that an element is properly populated from the config.
|
||||
|
||||
@@ -81,10 +81,10 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -155,6 +155,7 @@ common_deps = [
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -220,16 +219,11 @@ func (vs *Server) getPayloadHeaderFromBuilder(
|
||||
return nil, errors.New("builder returned nil bid")
|
||||
}
|
||||
bidVersion := signedBid.Version()
|
||||
fork, err := forks.Fork(slots.ToEpoch(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get fork information")
|
||||
}
|
||||
forkVersion, ok := params.ConfigForkVersions(params.BeaconConfig())[bytesutil.ToBytes4(fork.CurrentVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("unable to find current fork in schedule")
|
||||
}
|
||||
epoch := slots.ToEpoch(slot)
|
||||
entry := params.GetNetworkScheduleEntry(epoch)
|
||||
forkVersion := entry.VersionEnum
|
||||
if !isVersionCompatible(bidVersion, forkVersion) {
|
||||
return nil, fmt.Errorf("builder bid response version: %d is not compatible with expected version: %d for epoch %d", bidVersion, forkVersion, slots.ToEpoch(slot))
|
||||
return nil, fmt.Errorf("builder bid response version: %d is not compatible with expected version: %d for epoch %d", bidVersion, forkVersion, epoch)
|
||||
}
|
||||
|
||||
bid, err := signedBid.Message()
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
@@ -28,10 +29,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
@@ -155,32 +157,31 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
|
||||
//
|
||||
// DomainData fetches the current domain version information from the beacon state.
|
||||
func (vs *Server) DomainData(ctx context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
fork, err := forks.Fork(request.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headGenesisValidatorsRoot := vs.HeadFetcher.HeadGenesisValidatorsRoot()
|
||||
isExitDomain := [4]byte(request.Domain) == params.BeaconConfig().DomainVoluntaryExit
|
||||
if isExitDomain {
|
||||
epoch := request.Epoch
|
||||
rd := bytesutil.ToBytes4(request.Domain)
|
||||
if bytes.Equal(request.Domain, params.BeaconConfig().DomainVoluntaryExit[:]) {
|
||||
hs, err := vs.HeadFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hs.Version() >= version.Deneb {
|
||||
fork = ðpb.Fork{
|
||||
if slots.ToEpoch(hs.Slot()) >= params.BeaconConfig().DenebForkEpoch {
|
||||
return computeDomainData(rd, epoch, ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
dv, err := signing.Domain(fork, request.Epoch, bytesutil.ToBytes4(request.Domain), headGenesisValidatorsRoot[:])
|
||||
return computeDomainData(rd, epoch, params.ForkFromConfig(params.BeaconConfig(), epoch))
|
||||
}
|
||||
|
||||
func computeDomainData(domain [4]byte, epoch primitives.Epoch, fork *ethpb.Fork) (*ethpb.DomainResponse, error) {
|
||||
gvr := genesis.ValidatorsRoot()
|
||||
domainData, err := signing.Domain(fork, epoch, domain, gvr[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.DomainResponse{
|
||||
SignatureDomain: dv,
|
||||
}, nil
|
||||
return ðpb.DomainResponse{SignatureDomain: domainData}, nil
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
|
||||
@@ -2,6 +2,7 @@ package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,11 +18,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/mock"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -317,55 +320,63 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
|
||||
require.LogsContain(t, hook, "Sending genesis time")
|
||||
}
|
||||
|
||||
func TestServer_DomainData_Exits(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
[4]byte(cfg.GenesisForkVersion): primitives.Epoch(0),
|
||||
[4]byte(cfg.AltairForkVersion): primitives.Epoch(5),
|
||||
[4]byte(cfg.BellatrixForkVersion): primitives.Epoch(10),
|
||||
[4]byte(cfg.CapellaForkVersion): primitives.Epoch(15),
|
||||
[4]byte(cfg.DenebForkVersion): primitives.Epoch(20),
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState := ðpb.BeaconStateBellatrix{
|
||||
Slot: 4000,
|
||||
}
|
||||
block := util.NewBeaconBlock()
|
||||
genesisRoot, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
s, err := state_native.InitializeFromProtoUnsafeBellatrix(beaconState)
|
||||
func testSigDomainForSlot(t *testing.T, domain [4]byte, chsrv *mockChain.ChainService, epoch primitives.Epoch) *ethpb.DomainResponse {
|
||||
cfg := params.BeaconConfig()
|
||||
gvr := genesis.ValidatorsRoot()
|
||||
s, err := state_native.InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
Slot: primitives.Slot(epoch) * cfg.SlotsPerEpoch,
|
||||
GenesisValidatorsRoot: gvr[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
chsrv.State = s
|
||||
vs := &Server{
|
||||
Ctx: t.Context(),
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
HeadFetcher: &mockChain.ChainService{State: s, Root: genesisRoot[:]},
|
||||
HeadFetcher: chsrv,
|
||||
}
|
||||
|
||||
reqDomain, err := vs.DomainData(t.Context(), ðpb.DomainRequest{
|
||||
Epoch: 100,
|
||||
Domain: params.BeaconConfig().DomainDeposit[:],
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
wantedDomain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, params.BeaconConfig().DenebForkVersion, make([]byte, 32))
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)
|
||||
|
||||
beaconStateNew := ðpb.BeaconStateDeneb{
|
||||
Slot: 4000,
|
||||
}
|
||||
s, err = state_native.InitializeFromProtoUnsafeDeneb(beaconStateNew)
|
||||
require.NoError(t, err)
|
||||
vs.HeadFetcher = &mockChain.ChainService{State: s, Root: genesisRoot[:]}
|
||||
|
||||
reqDomain, err = vs.DomainData(t.Context(), ðpb.DomainRequest{
|
||||
Epoch: 100,
|
||||
Domain: params.BeaconConfig().DomainVoluntaryExit[:],
|
||||
domainResp, err := vs.DomainData(t.Context(), ðpb.DomainRequest{
|
||||
Epoch: epoch,
|
||||
Domain: domain[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedDomain, err = signing.ComputeDomain(params.BeaconConfig().DomainVoluntaryExit, params.BeaconConfig().CapellaForkVersion, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)
|
||||
return domainResp
|
||||
}
|
||||
|
||||
func requireSigningEqual(t *testing.T, name string, domain [4]byte, req, want primitives.Epoch, chsrv *mockChain.ChainService) {
|
||||
t.Run(fmt.Sprintf("%s_%#x", name, domain), func(t *testing.T) {
|
||||
gvr := genesis.ValidatorsRoot()
|
||||
resp := testSigDomainForSlot(t, domain, chsrv, req)
|
||||
entry := params.GetNetworkScheduleEntry(want)
|
||||
wanted, err := signing.ComputeDomain(domain, entry.ForkVersion[:], gvr[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, hexutil.Encode(wanted), hexutil.Encode(resp.SignatureDomain))
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_DomainData_Exits(t *testing.T) {
|
||||
// This test makes 2 sets of assertions:
|
||||
// - the deposit domain is always computed wrt the fork version at the given epoch
|
||||
// - the exit domain is the same until deneb, at which point it is always computed wrt the capella fork version
|
||||
params.SetActiveTestCleanup(t, params.MainnetConfig())
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
block := util.NewBeaconBlock()
|
||||
genesisRoot, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
chsrv := &mockChain.ChainService{Root: genesisRoot[:]}
|
||||
last := params.LastForkEpoch()
|
||||
requireSigningEqual(t, "genesis deposit", cfg.DomainDeposit, cfg.GenesisEpoch, cfg.GenesisEpoch, chsrv)
|
||||
requireSigningEqual(t, "altair deposit", cfg.DomainDeposit, cfg.AltairForkEpoch, cfg.AltairForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "bellatrix deposit", cfg.DomainDeposit, cfg.BellatrixForkEpoch, cfg.BellatrixForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "capella deposit", cfg.DomainDeposit, cfg.CapellaForkEpoch, cfg.CapellaForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "deneb deposit", cfg.DomainDeposit, cfg.DenebForkEpoch, cfg.DenebForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "last epoch deposit", cfg.DomainDeposit, last, last, chsrv)
|
||||
|
||||
requireSigningEqual(t, "genesis exit", cfg.DomainVoluntaryExit, cfg.GenesisEpoch, cfg.GenesisEpoch, chsrv)
|
||||
requireSigningEqual(t, "altair exit", cfg.DomainVoluntaryExit, cfg.AltairForkEpoch, cfg.AltairForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "bellatrix exit", cfg.DomainVoluntaryExit, cfg.BellatrixForkEpoch, cfg.BellatrixForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "capella exit", cfg.DomainVoluntaryExit, cfg.CapellaForkEpoch, cfg.CapellaForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "deneb exit", cfg.DomainVoluntaryExit, cfg.DenebForkEpoch, cfg.CapellaForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "last epoch exit", cfg.DomainVoluntaryExit, last, cfg.CapellaForkEpoch, chsrv)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"clock.go",
|
||||
"synchronizer.go",
|
||||
"testing.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/startup",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -41,6 +41,11 @@ func (g *Clock) CurrentSlot() types.Slot {
|
||||
return slots.Duration(g.t, now)
|
||||
}
|
||||
|
||||
// CurrentEpoch returns the current epoch relative to the time.Time value that Clock embeds.
|
||||
func (g *Clock) CurrentEpoch() types.Epoch {
|
||||
return slots.ToEpoch(g.CurrentSlot())
|
||||
}
|
||||
|
||||
// SlotStart computes the time the given slot begins.
|
||||
func (g *Clock) SlotStart(slot types.Slot) (time.Time, error) {
|
||||
return slots.StartTime(g.t, slot)
|
||||
|
||||
33
beacon-chain/startup/testing.go
Normal file
33
beacon-chain/startup/testing.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package startup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
// MockNower is a mock implementation of the Nower interface for use in tests.
|
||||
type MockNower struct {
|
||||
t time.Time
|
||||
}
|
||||
|
||||
// Now satisfies the Nower interface using a mocked time value
|
||||
func (m *MockNower) Now() time.Time {
|
||||
return m.t
|
||||
}
|
||||
|
||||
// SetSlot sets the current time to the start of the given slot.
|
||||
func (m *MockNower) SetSlot(t *testing.T, c *Clock, s primitives.Slot) {
|
||||
now, err := slots.StartTime(c.GenesisTime(), s)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to set slot: %v", err)
|
||||
}
|
||||
m.t = now
|
||||
}
|
||||
|
||||
// Set sets the current time to the given time.
|
||||
func (m *MockNower) Set(now time.Time) {
|
||||
m.t = now
|
||||
}
|
||||
@@ -126,7 +126,6 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
@@ -265,7 +264,6 @@ go_test(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/equality:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -35,7 +35,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
@@ -80,7 +79,6 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -107,8 +106,7 @@ func (vr verifier) blockSignatureBatch(b blocks.ROBlock) (*bls.SignatureBatch, e
|
||||
}
|
||||
|
||||
func newBackfillVerifier(vr []byte, keys [][fieldparams.BLSPubkeyLength]byte) (*verifier, error) {
|
||||
dc, err := newDomainCache(vr, params.BeaconConfig().DomainBeaconProposer,
|
||||
forks.NewOrderedSchedule(params.BeaconConfig()))
|
||||
dc, err := newDomainCache(vr, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -122,33 +120,31 @@ func newBackfillVerifier(vr []byte, keys [][fieldparams.BLSPubkeyLength]byte) (*
|
||||
|
||||
// domainCache provides a fast signing domain lookup by epoch.
|
||||
type domainCache struct {
|
||||
fsched forks.OrderedSchedule
|
||||
forkDomains map[[4]byte][]byte
|
||||
dType [bls.DomainByteLength]byte
|
||||
}
|
||||
|
||||
func newDomainCache(vRoot []byte, dType [bls.DomainByteLength]byte, fsched forks.OrderedSchedule) (*domainCache, error) {
|
||||
func newDomainCache(vRoot []byte, dType [bls.DomainByteLength]byte) (*domainCache, error) {
|
||||
dc := &domainCache{
|
||||
fsched: fsched,
|
||||
forkDomains: make(map[[4]byte][]byte),
|
||||
dType: dType,
|
||||
}
|
||||
for _, entry := range fsched {
|
||||
d, err := signing.ComputeDomain(dc.dType, entry.Version[:], vRoot)
|
||||
for _, entry := range params.SortedForkSchedule() {
|
||||
d, err := signing.ComputeDomain(dc.dType, entry.ForkVersion[:], vRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to pre-compute signing domain for fork version=%#x", entry.Version)
|
||||
return nil, errors.Wrapf(err, "failed to pre-compute signing domain for fork version=%#x", entry.ForkVersion)
|
||||
}
|
||||
dc.forkDomains[entry.Version] = d
|
||||
dc.forkDomains[entry.ForkVersion] = d
|
||||
}
|
||||
return dc, nil
|
||||
}
|
||||
|
||||
func (dc *domainCache) forEpoch(e primitives.Epoch) ([]byte, error) {
|
||||
fork, err := dc.fsched.VersionForEpoch(e)
|
||||
fork, err := params.Fork(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d, ok := dc.forkDomains[fork]
|
||||
d, ok := dc.forkDomains[[4]byte(fork.CurrentVersion)]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(errUnknownDomain, "fork version=%#x, epoch=%d", fork, e)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/interop"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -30,18 +29,17 @@ func TestDomainCache(t *testing.T) {
|
||||
}
|
||||
|
||||
vRoot, err := hexutil.Decode("0x0011223344556677889900112233445566778899001122334455667788990011")
|
||||
require.NoError(t, err)
|
||||
dType := cfg.DomainBeaconProposer
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 32, len(vRoot))
|
||||
fsched := forks.NewOrderedSchedule(cfg)
|
||||
dc, err := newDomainCache(vRoot, dType, fsched)
|
||||
dc, err := newDomainCache(vRoot, dType)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(fsched), len(dc.forkDomains))
|
||||
for i := range fsched {
|
||||
e := fsched[i].Epoch
|
||||
ad, err := dc.forEpoch(e)
|
||||
schedule := params.SortedForkSchedule()
|
||||
require.Equal(t, len(schedule), len(dc.forkDomains))
|
||||
for _, entry := range schedule {
|
||||
ad, err := dc.forEpoch(entry.Epoch)
|
||||
require.NoError(t, err)
|
||||
ed, err := signing.ComputeDomain(dType, fsched[i].Version[:], vRoot)
|
||||
ed, err := signing.ComputeDomain(dType, entry.ForkVersion[:], vRoot)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, ed, ad)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -156,11 +155,7 @@ func (r *expectedBlobChunk) requireExpected(t *testing.T, s *Service, stream net
|
||||
|
||||
c, err := readContextFromStream(stream)
|
||||
require.NoError(t, err)
|
||||
|
||||
valRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(r.sidecar.Slot()), valRoot[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ctxBytes, bytesutil.ToBytes4(c))
|
||||
require.Equal(t, params.ForkDigest(slots.ToEpoch(r.sidecar.Slot())), bytesutil.ToBytes4(c))
|
||||
|
||||
sc := ðpb.BlobSidecar{}
|
||||
require.NoError(t, encoding.DecodeWithMaxLength(stream, sc))
|
||||
@@ -270,27 +265,24 @@ func (c *blobsTestCase) run(t *testing.T) {
|
||||
// we use max uints for future forks, but this causes overflows when computing slots
|
||||
// so it is helpful in tests to temporarily reposition the epochs to give room for some math.
|
||||
func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||
if cfg.CapellaForkEpoch == math.MaxUint64 {
|
||||
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch + 100
|
||||
}
|
||||
if cfg.DenebForkEpoch == math.MaxUint64 {
|
||||
cfg.DenebForkEpoch = cfg.CapellaForkEpoch + 100
|
||||
if cfg.FuluForkEpoch == math.MaxUint64 {
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 100
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
df, err := forks.Fork(de)
|
||||
df, err := params.Fork(de)
|
||||
require.NoError(t, err)
|
||||
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||
ce := de + denebBuffer
|
||||
fe := ce - 2
|
||||
cs, err := slots.EpochStart(ce)
|
||||
require.NoError(t, err)
|
||||
now := time.Now()
|
||||
genOffset := types.Slot(params.BeaconConfig().SecondsPerSlot) * cs
|
||||
genesis := now.Add(-1 * time.Second * time.Duration(int64(genOffset)))
|
||||
clock := startup.NewClock(genesis, [32]byte{})
|
||||
genesis := time.Now()
|
||||
mockNow := startup.MockNower{}
|
||||
clock := startup.NewClock(genesis, params.BeaconConfig().GenesisValidatorsRoot, startup.WithNower(mockNow.Now))
|
||||
mockNow.SetSlot(t, clock, cs)
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: fe},
|
||||
Fork: df,
|
||||
|
||||
@@ -46,7 +46,6 @@ go_test(
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
blocktest "github.com/OffchainLabs/prysm/v6/consensus-types/blocks/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
func TestDownloadFinalizedData(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.InitializeForkSchedule()
|
||||
|
||||
// avoid the altair zone because genesis tests are easier to set up
|
||||
epoch := cfg.AltairForkEpoch - 1
|
||||
@@ -30,7 +30,7 @@ func TestDownloadFinalizedData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forks.ForkForEpochFromConfig(cfg, epoch)
|
||||
fork := params.ForkFromConfig(cfg, epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
blocktest "github.com/OffchainLabs/prysm/v6/consensus-types/blocks/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -83,7 +82,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, epoch)
|
||||
fork, err := params.Fork(epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, wst.SetFork(fork))
|
||||
|
||||
@@ -182,7 +181,7 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
|
||||
fork, err := params.Fork(cfg.GenesisEpoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, wst.SetFork(fork))
|
||||
|
||||
@@ -279,33 +278,11 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
|
||||
require.Equal(t, expectedEpoch, actualEpoch)
|
||||
}
|
||||
|
||||
func forkForEpoch(cfg *params.BeaconChainConfig, epoch primitives.Epoch) (*ethpb.Fork, error) {
|
||||
os := forks.NewOrderedSchedule(cfg)
|
||||
currentVersion, err := os.VersionForEpoch(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevVersion, err := os.Previous(currentVersion)
|
||||
if err != nil {
|
||||
if !errors.Is(err, forks.ErrNoPreviousVersion) {
|
||||
return nil, err
|
||||
}
|
||||
// use same version for both in the case of genesis
|
||||
prevVersion = currentVersion
|
||||
}
|
||||
forkEpoch := cfg.ForkVersionSchedule[currentVersion]
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: prevVersion[:],
|
||||
CurrentVersion: currentVersion[:],
|
||||
Epoch: forkEpoch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, primitives.Epoch) {
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
|
||||
fork, err := forkForEpoch(cfg, cfg.AltairForkEpoch)
|
||||
fork, err := params.Fork(cfg.AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package sync
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
@@ -87,12 +86,8 @@ type ContextByteVersions map[[4]byte]int
|
||||
// and the runtime/version identifier for the corresponding fork.
|
||||
func ContextByteVersionsForValRoot(valRoot [32]byte) (ContextByteVersions, error) {
|
||||
m := make(ContextByteVersions)
|
||||
for fv, v := range params.ConfigForkVersions(params.BeaconConfig()) {
|
||||
digest, err := signing.ComputeForkDigest(fv[:], valRoot[:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to compute fork digest for fork version %#x", fv)
|
||||
}
|
||||
m[digest] = v
|
||||
for _, entry := range params.SortedNetworkScheduleEntries() {
|
||||
m[entry.ForkDigest] = entry.VersionEnum
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ func (s *Service) scheduleMissingDataColumnSidecarsBroadcast(
|
||||
})
|
||||
|
||||
// Get the time corresponding to the start of the slot.
|
||||
genesisTime := s.cfg.chain.GenesisTime()
|
||||
genesisTime := s.cfg.clock.GenesisTime()
|
||||
slotStartTime, err := slots.StartTime(genesisTime, slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to calculate slot start time")
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
@@ -19,7 +17,6 @@ import (
|
||||
)
|
||||
|
||||
var errNilPubsubMessage = errors.New("nil pubsub message")
|
||||
var errInvalidTopic = errors.New("invalid topic format")
|
||||
|
||||
func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, error) {
|
||||
if msg == nil || msg.Topic == nil || *msg.Topic == "" {
|
||||
@@ -75,7 +72,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
func (*Service) replaceForkDigest(topic string) (string, error) {
|
||||
subStrings := strings.Split(topic, "/")
|
||||
if len(subStrings) != 4 {
|
||||
return "", errInvalidTopic
|
||||
return "", p2p.ErrInvalidTopic
|
||||
}
|
||||
subStrings[2] = "%x"
|
||||
return strings.Join(subStrings, "/"), nil
|
||||
@@ -105,29 +102,21 @@ func extractDataTypeFromTypeMap[T any](typeMap map[[4]byte]func() (T, error), di
|
||||
if len(digest) == 0 {
|
||||
f, ok := typeMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return zero, fmt.Errorf("no %T type exists for the genesis fork version", zero)
|
||||
return zero, errors.Wrapf(errInvalidDigest, "no %T type exists for the genesis fork version", zero)
|
||||
}
|
||||
return f()
|
||||
}
|
||||
if len(digest) != forkDigestLength {
|
||||
return zero, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
return zero, errors.Wrapf(errInvalidDigest, "invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
|
||||
}
|
||||
vRoot := tor.GenesisValidatorsRoot()
|
||||
for k, f := range typeMap {
|
||||
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return f()
|
||||
}
|
||||
forkVersion, _, err := params.ForkDataFromDigest([4]byte(digest))
|
||||
if err != nil {
|
||||
return zero, errors.Wrapf(ErrNoValidDigest, "could not extract %T data type, saw digest=%#x", zero, digest)
|
||||
}
|
||||
return zero, errors.Wrapf(
|
||||
ErrNoValidDigest,
|
||||
"could not extract %T data type, saw digest=%#x, genesis=%v, vr=%#x",
|
||||
zero,
|
||||
digest,
|
||||
tor.GenesisTime(),
|
||||
tor.GenesisValidatorsRoot(),
|
||||
)
|
||||
|
||||
f, ok := typeMap[forkVersion]
|
||||
if ok {
|
||||
return f()
|
||||
}
|
||||
return zero, errors.Wrapf(ErrNoValidDigest, "could not extract %T data type, saw digest=%#x", zero, digest)
|
||||
}
|
||||
|
||||
@@ -4,13 +4,11 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
@@ -30,8 +28,9 @@ import (
|
||||
)
|
||||
|
||||
func TestService_decodePubsubMessage(t *testing.T) {
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
entry := params.GetNetworkScheduleEntry(params.BeaconConfig().GenesisEpoch)
|
||||
tests := []struct {
|
||||
name string
|
||||
topic string
|
||||
@@ -56,7 +55,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
{
|
||||
name: "invalid topic format",
|
||||
topic: "foo",
|
||||
wantErr: errInvalidTopic,
|
||||
wantErr: p2p.ErrInvalidTopic,
|
||||
},
|
||||
{
|
||||
name: "topic not mapped to any message type",
|
||||
@@ -65,7 +64,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "valid message -- beacon block",
|
||||
topic: fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlock{})], digest),
|
||||
topic: fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlock{})], entry.ForkDigest),
|
||||
input: &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: func() []byte {
|
||||
@@ -102,10 +101,11 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
tt.input.Message.Topic = &topic
|
||||
}
|
||||
got, err := s.decodePubsubMessage(tt.input)
|
||||
if err != nil && err != tt.wantErr && !strings.Contains(err.Error(), tt.wantErr.Error()) {
|
||||
t.Errorf("decodePubsubMessage() error = %v, wantErr %v", err, tt.wantErr)
|
||||
if tt.wantErr != nil {
|
||||
require.ErrorIs(t, err, tt.wantErr, "decodePubsubMessage() error mismatch")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err, "decodePubsubMessage() unexpected error")
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
diff, _ := messagediff.PrettyDiff(got, tt.want)
|
||||
t.Log(diff)
|
||||
@@ -116,24 +116,11 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExtractDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := signing.ComputeForkDigest(params.BeaconConfig().CapellaForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := signing.ComputeForkDigest(params.BeaconConfig().ElectraForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
fuluDigest, err := signing.ComputeForkDigest(params.BeaconConfig().FuluForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
digest [4]byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
@@ -146,40 +133,10 @@ func TestExtractDataType(t *testing.T) {
|
||||
wantAttSlashing ethpb.AttSlashing
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantMd: wrapper.WrappedMetadataV0(ðpb.MetaDataV0{}),
|
||||
wantAtt: ðpb.Attestation{},
|
||||
wantAggregate: ðpb.SignedAggregateAttestationAndProof{},
|
||||
wantAttSlashing: ðpb.AttesterSlashing{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
wantMd: nil,
|
||||
wantAtt: nil,
|
||||
wantAggregate: nil,
|
||||
wantAttSlashing: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
digest: [4]byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: nil,
|
||||
@@ -192,7 +149,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().GenesisEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -208,7 +165,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().AltairForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -225,7 +182,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "bellatrix fork version",
|
||||
args: args{
|
||||
digest: bellatrixDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -242,7 +199,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "capella fork version",
|
||||
args: args{
|
||||
digest: capellaDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().CapellaForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -259,7 +216,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "deneb fork version",
|
||||
args: args{
|
||||
digest: denebDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().DenebForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -276,7 +233,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "electra fork version",
|
||||
args: args{
|
||||
digest: electraDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().ElectraForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -293,7 +250,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
{
|
||||
name: "fulu fork version",
|
||||
args: args{
|
||||
digest: fuluDigest[:],
|
||||
digest: params.ForkDigest(params.BeaconConfig().FuluForkEpoch),
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -310,7 +267,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest, tt.args.chain)
|
||||
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest[:], tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("block: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -318,7 +275,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
if !reflect.DeepEqual(gotBlock, tt.wantBlock) {
|
||||
t.Errorf("block: got = %v, want %v", gotBlock, tt.wantBlock)
|
||||
}
|
||||
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest, tt.args.chain)
|
||||
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest[:], tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("attestation: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -326,7 +283,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
if !reflect.DeepEqual(gotAtt, tt.wantAtt) {
|
||||
t.Errorf("attestation: got = %v, want %v", gotAtt, tt.wantAtt)
|
||||
}
|
||||
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest, tt.args.chain)
|
||||
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest[:], tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("aggregate: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -334,7 +291,7 @@ func TestExtractDataType(t *testing.T) {
|
||||
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
|
||||
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
|
||||
}
|
||||
gotAttSlashing, err := extractDataTypeFromTypeMap(types.AttesterSlashingMap, tt.args.digest, tt.args.chain)
|
||||
gotAttSlashing, err := extractDataTypeFromTypeMap(types.AttesterSlashingMap, tt.args.digest[:], tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("attester slashing: error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -345,3 +302,11 @@ func TestExtractDataType(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractDataTypeFromTypeMapInvalid(t *testing.T) {
|
||||
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}}
|
||||
_, err := extractDataTypeFromTypeMap(types.BlockMap, []byte{0x00, 0x01}, chain)
|
||||
require.ErrorIs(t, err, errInvalidDigest)
|
||||
_, err = extractDataTypeFromTypeMap(types.AttestationMap, []byte{0x00, 0x01}, chain)
|
||||
require.ErrorIs(t, err, errInvalidDigest)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
@@ -42,67 +41,46 @@ func (s *Service) forkWatcher() {
|
||||
|
||||
// registerForUpcomingFork registers appropriate gossip and RPC topic if there is a fork in the next epoch.
|
||||
func (s *Service) registerForUpcomingFork(currentEpoch primitives.Epoch) error {
|
||||
// Get the genesis validators root.
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
nextEntry := params.GetNetworkScheduleEntry(currentEpoch + 1)
|
||||
// Check if there is a fork in the next epoch.
|
||||
isForkNextEpoch, err := forks.IsForkNextEpoch(s.cfg.clock.GenesisTime(), genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not retrieve next fork epoch")
|
||||
}
|
||||
|
||||
// Exit early if there is no fork in the next epoch.
|
||||
if !isForkNextEpoch {
|
||||
if nextEntry.ForkDigest == s.registeredNetworkEntry.ForkDigest {
|
||||
return nil
|
||||
}
|
||||
|
||||
beforeForkEpoch := currentEpoch
|
||||
forkEpoch := beforeForkEpoch + 1
|
||||
|
||||
// Get the fork afterForkDigest for the next epoch.
|
||||
afterForkDigest, err := forks.ForkDigestFromEpoch(forkEpoch, genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve fork digest")
|
||||
}
|
||||
|
||||
// Exit early if the topics for the next epoch are already registered.
|
||||
// It likely to be the case for all slots of the epoch that are not the first one.
|
||||
if s.subHandler.digestExists(afterForkDigest) {
|
||||
if s.subHandler.digestExists(nextEntry.ForkDigest) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Register the subscribers (gossipsub) for the next epoch.
|
||||
s.registerSubscribers(forkEpoch, afterForkDigest)
|
||||
s.registerSubscribers(nextEntry.Epoch, nextEntry.ForkDigest)
|
||||
|
||||
// Get the handlers for the current and next fork.
|
||||
beforeForkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(beforeForkEpoch)
|
||||
currentHandler, err := s.rpcHandlerByTopicFromEpoch(currentEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
|
||||
}
|
||||
|
||||
forkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(forkEpoch)
|
||||
nextHandler, err := s.rpcHandlerByTopicFromEpoch(nextEntry.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "RPC handler by topic from fork epoch")
|
||||
}
|
||||
|
||||
// Compute newly added topics.
|
||||
newRPCHandlerByTopic := addedRPCHandlerByTopic(beforeForkHandlerByTopic, forkHandlerByTopic)
|
||||
newHandlersByTopic := addedRPCHandlerByTopic(currentHandler, nextHandler)
|
||||
|
||||
// Register the new RPC handlers.
|
||||
for topic, handler := range newRPCHandlerByTopic {
|
||||
for topic, handler := range newHandlersByTopic {
|
||||
s.registerRPC(topic, handler)
|
||||
}
|
||||
|
||||
s.registeredNetworkEntry = nextEntry
|
||||
return nil
|
||||
}
|
||||
|
||||
// deregisterFromPastFork deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
|
||||
func (s *Service) deregisterFromPastFork(currentEpoch primitives.Epoch) error {
|
||||
// Extract the genesis validators root.
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
// Get the fork.
|
||||
currentFork, err := forks.Fork(currentEpoch)
|
||||
currentFork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "genesis validators root")
|
||||
}
|
||||
@@ -123,10 +101,7 @@ func (s *Service) deregisterFromPastFork(currentEpoch primitives.Epoch) error {
|
||||
// Look at the previous fork's digest.
|
||||
beforeForkEpoch := currentFork.Epoch - 1
|
||||
|
||||
beforeForkDigest, err := forks.ForkDigestFromEpoch(beforeForkEpoch, genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fork digest from epoch")
|
||||
}
|
||||
beforeForkDigest := params.ForkDigest(beforeForkEpoch)
|
||||
|
||||
// Exit early if there are no topics with that particular digest.
|
||||
if !s.subHandler.digestExists(beforeForkDigest) {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
)
|
||||
@@ -91,9 +90,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -135,9 +132,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -177,9 +172,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -221,9 +214,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -266,9 +257,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
@@ -387,14 +376,12 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
r.registerRPC(topic, handler)
|
||||
}
|
||||
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(0, genRoot[:])
|
||||
digest := params.ForkDigest(0)
|
||||
assert.NoError(t, err)
|
||||
r.registerSubscribers(0, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest = params.ForkDigest(3)
|
||||
r.registerSubscribers(3, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
@@ -403,12 +390,9 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(0, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(0)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest = params.ForkDigest(3)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
|
||||
ptcls := s.cfg.p2p.Host().Mux().Protocols()
|
||||
@@ -455,14 +439,11 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(1, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(1)
|
||||
r.registerSubscribers(1, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest = params.ForkDigest(3)
|
||||
r.registerSubscribers(3, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
@@ -471,12 +452,9 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(1, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(1)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest = params.ForkDigest(3)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
},
|
||||
},
|
||||
|
||||
@@ -263,31 +263,23 @@ func TestProcessPendingAtts_HasBlockSaveUnaggregatedAttElectra(t *testing.T) {
|
||||
func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen(t *testing.T) {
|
||||
// Setup configuration and fork version schedule.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
fvs := map[[fieldparams.VersionLength]byte]primitives.Epoch{
|
||||
bytesutil.ToBytes4(cfg.GenesisForkVersion): 1,
|
||||
bytesutil.ToBytes4(cfg.AltairForkVersion): 2,
|
||||
bytesutil.ToBytes4(cfg.BellatrixForkVersion): 3,
|
||||
bytesutil.ToBytes4(cfg.CapellaForkVersion): 4,
|
||||
bytesutil.ToBytes4(cfg.DenebForkVersion): 5,
|
||||
bytesutil.ToBytes4(cfg.FuluForkVersion): 6,
|
||||
bytesutil.ToBytes4(cfg.ElectraForkVersion): 0,
|
||||
}
|
||||
cfg.ForkVersionSchedule = fvs
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
// Initialize logging, database, and P2P components.
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbtest.SetupDB(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
validators := uint64(256)
|
||||
currentSlot := 1 + (primitives.Slot(params.BeaconConfig().ElectraForkEpoch) * params.BeaconConfig().SlotsPerEpoch)
|
||||
genesisOffset := time.Duration(currentSlot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
clock := startup.NewClock(time.Now().Add(-1*genesisOffset), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
|
||||
// Create genesis state and associated keys.
|
||||
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, validators)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
require.NoError(t, beaconState.SetSlot(clock.CurrentSlot()))
|
||||
|
||||
// Create and save a new Beacon block.
|
||||
sb := util.NewBeaconBlockElectra()
|
||||
sb.Block.Slot = clock.CurrentSlot()
|
||||
util.SaveBlock(t, t.Context(), db, sb)
|
||||
|
||||
// Save state with block root.
|
||||
@@ -298,10 +290,10 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
att := ðpb.SingleAttestation{
|
||||
CommitteeId: 8, // choose a non 0
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
Slot: clock.CurrentSlot(),
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: root[:]},
|
||||
Source: ðpb.Checkpoint{Epoch: clock.CurrentEpoch() - 1, Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: clock.CurrentEpoch(), Root: root[:]},
|
||||
CommitteeIndex: 0,
|
||||
},
|
||||
}
|
||||
@@ -312,7 +304,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
att.AttesterIndex = committee[0]
|
||||
|
||||
// Compute attester domain and signature.
|
||||
attesterDomain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
attesterDomain, err := signing.Domain(beaconState.Fork(), clock.CurrentEpoch(), params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := signing.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
assert.NoError(t, err)
|
||||
@@ -327,7 +319,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: att.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
Epoch: clock.CurrentEpoch() - 2,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -348,7 +340,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis.Add(time.Duration(-1*int(params.BeaconConfig().SecondsPerSlot))*time.Second), chain.ValidatorsRoot),
|
||||
clock: clock,
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: &mock.SimpleNotifier{Feed: opn},
|
||||
},
|
||||
@@ -359,7 +351,8 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
go r.verifierRoutine()
|
||||
|
||||
// Save a new beacon state and link it with the block root.
|
||||
s, err := util.NewBeaconStateElectra()
|
||||
slotOpt := func(s *ethpb.BeaconStateElectra) error { s.Slot = clock.CurrentSlot(); return nil }
|
||||
s, err := util.NewBeaconStateElectra(slotOpt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ func (s *Service) streamBlobBatch(ctx context.Context, batch blockBatch, wQuota
|
||||
return wQuota, errors.Wrapf(err, "could not retrieve sidecar: index %d, block root %#x", i, root)
|
||||
}
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
@@ -77,7 +77,7 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
rp, err := validateBlobsByRange(r, s.cfg.chain.CurrentSlot())
|
||||
rp, err := validateBlobsByRange(r, s.cfg.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.downscorePeer(remotePeer, "blobSidecarsByRangeRpcHandlerValidationError")
|
||||
@@ -99,7 +99,7 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
var batch blockBatch
|
||||
|
||||
wQuota := params.BeaconConfig().MaxRequestBlobSidecars
|
||||
if slots.ToEpoch(s.cfg.chain.CurrentSlot()) >= params.BeaconConfig().ElectraForkEpoch {
|
||||
if slots.ToEpoch(s.cfg.clock.CurrentSlot()) >= params.BeaconConfig().ElectraForkEpoch {
|
||||
wQuota = params.BeaconConfig().MaxRequestBlobSidecarsElectra
|
||||
}
|
||||
for batch, ok = batcher.next(ctx, stream); ok; batch, ok = batcher.next(ctx, stream) {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func (c *blobsTestCase) defaultOldestSlotByRange(t *testing.T) types.Slot {
|
||||
currentEpoch := slots.ToEpoch(c.chain.CurrentSlot())
|
||||
currentEpoch := c.clock.CurrentEpoch()
|
||||
oldestEpoch := currentEpoch - params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
if oldestEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
oldestEpoch = params.BeaconConfig().DenebForkEpoch
|
||||
@@ -183,7 +183,6 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
name string
|
||||
current types.Slot
|
||||
req *ethpb.BlobSidecarsByRangeRequest
|
||||
// chain := defaultMockChain(t)
|
||||
|
||||
start types.Slot
|
||||
end types.Slot
|
||||
|
||||
@@ -97,7 +97,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
}
|
||||
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
@@ -30,57 +28,9 @@ func WriteBlockChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, en
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
var obtainedCtx []byte
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
switch blk.Version() {
|
||||
case version.Phase0:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().GenesisEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Altair:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Bellatrix:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Capella:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Deneb:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Electra:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Fulu:
|
||||
digest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
default:
|
||||
return errors.Wrapf(ErrUnrecognizedVersion, "block version %d is not recognized", blk.Version())
|
||||
}
|
||||
|
||||
if err := writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(blk.Block().Slot()))
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := encoding.EncodeWithMaxLength(stream, blk)
|
||||
@@ -150,16 +100,11 @@ func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOrac
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(sidecar.Slot()), valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctxBytes := params.ForkDigest(slots.ToEpoch(sidecar.Slot()))
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, sidecar)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, sidecar)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -168,18 +113,12 @@ func WriteLightClientBootstrapChunk(stream libp2pcore.Stream, tor blockchain.Tem
|
||||
return err
|
||||
}
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(bootstrap.Header().Beacon().Slot), valRoot[:])
|
||||
if err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(bootstrap.Header().Beacon().Slot))
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obtainedCtx := digest[:]
|
||||
if err = writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = encoding.EncodeWithMaxLength(stream, bootstrap)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, bootstrap)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -188,17 +127,11 @@ func WriteLightClientUpdateChunk(stream libp2pcore.Stream, tor blockchain.Tempor
|
||||
return err
|
||||
}
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), valRoot[:])
|
||||
if err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx := digest[:]
|
||||
|
||||
if err = writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, update)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, update)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -207,17 +140,12 @@ func WriteLightClientOptimisticUpdateChunk(stream libp2pcore.Stream, tor blockch
|
||||
return err
|
||||
}
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx := digest[:]
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
|
||||
if err = writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, update)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, update)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -226,17 +154,12 @@ func WriteLightClientFinalityUpdateChunk(stream libp2pcore.Stream, tor blockchai
|
||||
return err
|
||||
}
|
||||
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx := digest[:]
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
|
||||
if err = writeContextToStream(obtainedCtx, stream); err != nil {
|
||||
if err := writeContextToStream(digest[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, update)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, update)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -247,20 +170,13 @@ func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.Tempor
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return errors.Wrap(err, "stream write")
|
||||
}
|
||||
|
||||
// Fork digest.
|
||||
genesisValidatorsRoot := tor.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(sidecar.SignedBlockHeader.Header.Slot), genesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fork digest from epoch")
|
||||
}
|
||||
|
||||
ctxBytes := params.ForkDigest(slots.ToEpoch(sidecar.SignedBlockHeader.Header.Slot))
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return errors.Wrap(err, "write context to stream")
|
||||
}
|
||||
|
||||
// Sidecar.
|
||||
if _, err = encoding.EncodeWithMaxLength(stream, sidecar); err != nil {
|
||||
if _, err := encoding.EncodeWithMaxLength(stream, sidecar); err != nil {
|
||||
return errors.Wrap(err, "encode with max length")
|
||||
}
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
}
|
||||
|
||||
// Validate the request regarding its parameters.
|
||||
rangeParameters, err := validateDataColumnsByRange(request, s.cfg.chain.CurrentSlot())
|
||||
rangeParameters, err := validateDataColumnsByRange(request, s.cfg.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.downscorePeer(remotePeer, "dataColumnSidecarsByRangeRpcHandlerValidationError")
|
||||
@@ -151,7 +151,7 @@ func (s *Service) streamDataColumnBatch(ctx context.Context, batch blockBatch, q
|
||||
sidecar := verifiedRODataColumn.DataColumnSidecar
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
|
||||
if err := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sidecar); err != nil {
|
||||
if err := WriteDataColumnSidecarChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), sidecar); err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return quota, errors.Wrap(err, "write data column sidecar chunk")
|
||||
|
||||
@@ -19,26 +19,37 @@ import (
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
//beaconConfig.FuluForkEpoch = beaconConfig.ElectraForkEpoch + 100
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx := context.Background()
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
err := service.dataColumnSidecarsByRangeRPCHandler(ctx, nil, nil)
|
||||
require.ErrorIs(t, err, notDataColumnsByRangeIdentifiersError)
|
||||
})
|
||||
mockNower := &startup.MockNower{}
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot, startup.WithNower(mockNower.Now))
|
||||
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
t.Run("invalid request", func(t *testing.T) {
|
||||
slot := primitives.Slot(400)
|
||||
mockNower.SetSlot(t, clock, slot)
|
||||
|
||||
localP2P, remoteP2P := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -48,6 +59,7 @@ func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
chain: &chainMock.ChainService{
|
||||
Slot: &slot,
|
||||
},
|
||||
clock: clock,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
@@ -83,11 +95,6 @@ func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
slot := primitives.Slot(400)
|
||||
|
||||
params := []util.DataColumnParam{
|
||||
@@ -99,7 +106,7 @@ func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := storage.Save(verifiedRODataColumns)
|
||||
err = storage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
localP2P, remoteP2P := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
@@ -140,22 +147,18 @@ func TestDataColumnSidecarsByRangeRPCHandler(t *testing.T) {
|
||||
err = beaconDB.SaveROBlocks(ctx, roBlocks, false /*cache*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockNower.SetSlot(t, clock, slot)
|
||||
service := &Service{
|
||||
cfg: &config{
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
chain: &chainMock.ChainService{
|
||||
Slot: &slot,
|
||||
},
|
||||
p2p: localP2P,
|
||||
beaconDB: beaconDB,
|
||||
chain: &chainMock.ChainService{},
|
||||
dataColumnStorage: storage,
|
||||
clock: clock,
|
||||
},
|
||||
rateLimiter: newRateLimiter(localP2P),
|
||||
}
|
||||
|
||||
ctxMap := ContextByteVersions{
|
||||
[4]byte{245, 165, 253, 66}: version.Fulu,
|
||||
}
|
||||
|
||||
root0 := verifiedRODataColumns[0].BlockRoot()
|
||||
root3 := verifiedRODataColumns[3].BlockRoot()
|
||||
root5 := verifiedRODataColumns[5].BlockRoot()
|
||||
|
||||
@@ -133,7 +133,7 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
}
|
||||
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), verifiedRODataColumn.DataColumnSidecar); chunkErr != nil {
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.clock, s.cfg.p2p.Encoding(), verifiedRODataColumn.DataColumnSidecar); chunkErr != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
return chunkErr
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
@@ -28,10 +27,17 @@ import (
|
||||
)
|
||||
|
||||
func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
t.Run("wrong message type", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
err := service.dataColumnSidecarByRootRPCHandler(ctx, nil, nil)
|
||||
err := service.dataColumnSidecarByRootRPCHandler(t.Context(), nil, nil)
|
||||
require.ErrorIs(t, err, notDataColumnsByRootIdentifiersError)
|
||||
})
|
||||
|
||||
@@ -59,13 +65,13 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
})
|
||||
|
||||
localP2P.Connect(remoteP2P)
|
||||
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &types.DataColumnsByRootIdentifiers{{Columns: []uint64{1, 2, 3}}}
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
err = service.dataColumnSidecarByRootRPCHandler(ctx, msg, stream)
|
||||
err = service.dataColumnSidecarByRootRPCHandler(t.Context(), msg, stream)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) < 0)
|
||||
|
||||
@@ -125,10 +131,6 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
ctxMap := ContextByteVersions{
|
||||
[4]byte{245, 165, 253, 66}: version.Fulu,
|
||||
}
|
||||
|
||||
root0 := verifiedRODataColumns[0].BlockRoot()
|
||||
root3 := verifiedRODataColumns[3].BlockRoot()
|
||||
root5 := verifiedRODataColumns[5].BlockRoot()
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -63,17 +62,11 @@ func TestRPC_LightClientBootstrap(t *testing.T) {
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
|
||||
altairDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
altairDigest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
electraDigest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
for i := 1; i <= 5; i++ {
|
||||
t.Run(version.String(i), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, i)
|
||||
@@ -185,16 +178,11 @@ func TestRPC_LightClientOptimisticUpdate(t *testing.T) {
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
|
||||
altairDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
electraDigest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
|
||||
for i := 1; i <= 5; i++ {
|
||||
t.Run(version.String(i), func(t *testing.T) {
|
||||
@@ -305,16 +293,11 @@ func TestRPC_LightClientFinalityUpdate(t *testing.T) {
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
|
||||
altairDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
electraDigest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
|
||||
for i := 1; i <= 5; i++ {
|
||||
t.Run(version.String(i), func(t *testing.T) {
|
||||
@@ -425,16 +408,11 @@ func TestRPC_LightClientUpdatesByRange(t *testing.T) {
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
|
||||
|
||||
altairDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
bellatrixDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
capellaDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
denebDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
electraDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, chainService.ValidatorsRoot[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
electraDigest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
|
||||
for i := 1; i <= 5; i++ {
|
||||
t.Run(version.String(i), func(t *testing.T) {
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -181,17 +181,9 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, peerID peer.ID) (meta
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Get the genesis validators root.
|
||||
valRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
|
||||
// Get the fork digest from the current epoch and the genesis validators root.
|
||||
rpcCtx, err := forks.ForkDigestFromEpoch(currentEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fork digest from epoch")
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(currentEpoch)
|
||||
// Instantiate zero value of the metadata.
|
||||
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, rpcCtx[:], s.cfg.clock)
|
||||
msg, err := extractDataTypeFromTypeMap(types.MetaDataMap, digest[:], s.cfg.clock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "extract data type from type map")
|
||||
}
|
||||
|
||||
@@ -888,6 +888,13 @@ func TestErrInvalidFetchedDataDistinction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
nilTestCases := []struct {
|
||||
name string
|
||||
request *ethpb.DataColumnSidecarsByRangeRequest
|
||||
@@ -1033,10 +1040,7 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
ctx := t.Context()
|
||||
ctxMap := ContextByteVersions{[4]byte{245, 165, 253, 66}: version.Fulu}
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(ctx, clock, p1, p2.PeerID(), ctxMap, requestSent)
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, requestSent)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
@@ -1181,6 +1185,13 @@ func TestIsSidecarIndexRequested(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
nilTestCases := []struct {
|
||||
name string
|
||||
request p2ptypes.DataColumnsByRootIdentifiers
|
||||
@@ -1335,10 +1346,7 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
ctx := t.Context()
|
||||
ctxMap := ContextByteVersions{[4]byte{245, 165, 253, 66}: version.Fulu}
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(ctx, clock, p1, p2.PeerID(), ctxMap, sentRequest)
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, sentRequest)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
|
||||
@@ -177,6 +177,7 @@ type Service struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
dataColumnLogCh chan dataColumnLogEntry
|
||||
registeredNetworkEntry params.NetworkScheduleEntry
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
|
||||
@@ -14,14 +14,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/messagehandler"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -58,17 +57,13 @@ type (
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
|
||||
// parameters used for the `subscribeToSubnets` function.
|
||||
subscribeToSubnetsParameters struct {
|
||||
subscriptionBySubnet map[uint64]*pubsub.Subscription
|
||||
topicFormat string
|
||||
digest [4]byte
|
||||
genesisValidatorsRoot [fieldparams.RootLength]byte
|
||||
genesisTime time.Time
|
||||
currentSlot primitives.Slot
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
subscriptionBySubnet map[uint64]*pubsub.Subscription
|
||||
topicFormat string
|
||||
digest [4]byte
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
)
|
||||
|
||||
@@ -242,8 +237,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
// The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, digest [4]byte) *pubsub.Subscription {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
_, e, err := params.ForkDataFromDigest(digest)
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
panic(err) // lint:nopanic -- Impossible condition.
|
||||
@@ -452,8 +446,7 @@ func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the validity of the digest.
|
||||
valid, err := isDigestValid(p.digest, p.genesisTime, p.genesisValidatorsRoot)
|
||||
valid, err := isDigestValid(p.digest, s.cfg.clock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "is digest valid")
|
||||
}
|
||||
@@ -465,13 +458,8 @@ func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
return errInvalidDigest
|
||||
}
|
||||
|
||||
// Retrieve the subnets we want to join.
|
||||
subnetsToJoin := p.getSubnetsToJoin(p.currentSlot)
|
||||
|
||||
// Remove subscriptions that are no longer wanted.
|
||||
subnetsToJoin := p.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneSubscriptions(p.subscriptionBySubnet, subnetsToJoin, p.topicFormat, p.digest)
|
||||
|
||||
// Subscribe to wanted and not already registered subnets.
|
||||
for subnet := range subnetsToJoin {
|
||||
subnetTopic := fmt.Sprintf(p.topicFormat, p.digest, subnet)
|
||||
|
||||
@@ -486,44 +474,34 @@ func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
|
||||
// subscribeWithParameters subscribes to a list of subnets.
|
||||
func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
subscriptionBySubnet := make(map[uint64]*pubsub.Subscription)
|
||||
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
genesisTime := s.cfg.clock.GenesisTime()
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
secondsPerSlotDuration := time.Duration(secondsPerSlot) * time.Second
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
neededSubnets := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
|
||||
shortTopicFormat := p.topicFormat
|
||||
shortTopicFormatLen := len(shortTopicFormat)
|
||||
if shortTopicFormatLen >= 3 && shortTopicFormat[shortTopicFormatLen-3:] == "_%d" {
|
||||
shortTopicFormat = shortTopicFormat[:shortTopicFormatLen-3]
|
||||
}
|
||||
|
||||
shortTopic := fmt.Sprintf(shortTopicFormat, p.digest)
|
||||
|
||||
parameters := subscribeToSubnetsParameters{
|
||||
subscriptionBySubnet: subscriptionBySubnet,
|
||||
topicFormat: p.topicFormat,
|
||||
digest: p.digest,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
genesisTime: genesisTime,
|
||||
currentSlot: currentSlot,
|
||||
validate: p.validate,
|
||||
handle: p.handle,
|
||||
getSubnetsToJoin: p.getSubnetsToJoin,
|
||||
subscriptionBySubnet: make(map[uint64]*pubsub.Subscription),
|
||||
topicFormat: p.topicFormat,
|
||||
digest: p.digest,
|
||||
validate: p.validate,
|
||||
handle: p.handle,
|
||||
getSubnetsToJoin: p.getSubnetsToJoin,
|
||||
}
|
||||
|
||||
err := s.subscribeToSubnets(parameters)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not subscribe to subnets")
|
||||
}
|
||||
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
slotDuration := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
neededSubnets := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
// Subscribe to expected subnets and search for peers if needed at every slot.
|
||||
go func() {
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, secondsPerSlotDuration)
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDuration)
|
||||
defer cancel()
|
||||
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, p.topicFormat, p.digest, minimumPeersPerSubnet, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
@@ -531,25 +509,23 @@ func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
}
|
||||
}()
|
||||
|
||||
slotTicker := slots.NewSlotTicker(genesisTime, secondsPerSlot)
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case currentSlot := <-slotTicker.C():
|
||||
parameters.currentSlot = currentSlot
|
||||
case <-slotTicker.C():
|
||||
if err := s.subscribeToSubnets(parameters); err != nil {
|
||||
if errors.Is(err, errInvalidDigest) {
|
||||
log.WithField("topics", shortTopic).Debug("Digest is invalid, stopping subscription")
|
||||
return
|
||||
}
|
||||
|
||||
log.WithError(err).Error("Could not subscribe to subnets")
|
||||
continue
|
||||
}
|
||||
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, secondsPerSlotDuration)
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDuration)
|
||||
defer cancel()
|
||||
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, p.topicFormat, p.digest, minimumPeersPerSubnet, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
@@ -753,27 +729,19 @@ func (*Service) addDigestAndIndexToTopic(topic string, digest [4]byte, idx uint6
|
||||
}
|
||||
|
||||
func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
return forks.CreateForkDigest(s.cfg.clock.GenesisTime(), genRoot[:])
|
||||
return params.ForkDigest(s.cfg.clock.CurrentEpoch()), nil
|
||||
}
|
||||
|
||||
// Checks if the provided digest matches up with the current supposed digest.
|
||||
func isDigestValid(digest [4]byte, genesis time.Time, genValRoot [32]byte) (bool, error) {
|
||||
retDigest, err := forks.CreateForkDigest(genesis, genValRoot[:])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isNextEpoch, err := forks.IsForkNextEpoch(genesis, genValRoot[:])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
func isDigestValid(digest [4]byte, clock *startup.Clock) (bool, error) {
|
||||
current := clock.CurrentEpoch()
|
||||
// In the event there is a fork the next epoch,
|
||||
// we skip the check, as we subscribe subnets an
|
||||
// epoch in advance.
|
||||
if isNextEpoch {
|
||||
if params.DigestChangesAfter(current) {
|
||||
return true, nil
|
||||
}
|
||||
return retDigest == digest, nil
|
||||
return params.ForkDigest(current) == digest, nil
|
||||
}
|
||||
|
||||
// computeAllNeededSubnets computes the subnets we want to join
|
||||
|
||||
@@ -164,7 +164,7 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, ro
|
||||
// processBlobSidecarsFromExecution retrieves (if available) blob sidecars data from the execution client,
|
||||
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
|
||||
func (s *Service) processBlobSidecarsFromExecution(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) {
|
||||
startTime, err := slots.StartTime(s.cfg.chain.GenesisTime(), block.Block().Slot())
|
||||
startTime, err := slots.StartTime(s.cfg.clock.GenesisTime(), block.Block().Slot())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to convert slot to time")
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -320,8 +319,8 @@ func Test_wrapAndReportValidation(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{0x01},
|
||||
}
|
||||
fd, err := forks.CreateForkDigest(mChain.GenesisTime(), mChain.ValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(mChain.Genesis, mChain.ValidatorsRoot)
|
||||
fd := params.ForkDigest(clock.CurrentEpoch())
|
||||
mockTopic := fmt.Sprintf(p2p.BlockSubnetTopicFormat, fd) + encoder.SszNetworkEncoder{}.ProtocolSuffix()
|
||||
type args struct {
|
||||
topic string
|
||||
@@ -561,26 +560,21 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) {
|
||||
|
||||
func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.SecondsPerSlot = 1
|
||||
cfg.SlotsPerEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
currSlot := primitives.Slot(100)
|
||||
gt := time.Now().Add(-time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
vr := [32]byte{'A'}
|
||||
defer cancel()
|
||||
vr := params.BeaconConfig().GenesisValidatorsRoot
|
||||
mockNow := &startup.MockNower{}
|
||||
clock := startup.NewClock(time.Now(), vr, startup.WithNower(mockNow.Now))
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
mockNow.SetSlot(t, clock, denebSlot)
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
chain: &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
Slot: &currSlot,
|
||||
},
|
||||
clock: startup.NewClock(gt, vr),
|
||||
chain: &mockChain.ChainService{},
|
||||
clock: clock,
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
@@ -589,16 +583,19 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), 0, []uint64{0, 1}, 10*time.Second)
|
||||
genRoot := r.cfg.clock.GenesisValidatorsRoot()
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(r.cfg.clock.CurrentEpoch())
|
||||
version, e, err := params.ForkDataFromDigest(digest)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().DenebForkVersion), version)
|
||||
require.Equal(t, params.BeaconConfig().DenebForkEpoch, e)
|
||||
|
||||
r.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
})
|
||||
time.Sleep(2 * time.Second)
|
||||
sp := subscribeToSubnetsParameters{
|
||||
subscriptionBySubnet: make(map[uint64]*pubsub.Subscription),
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
}
|
||||
require.NoError(t, r.subscribeToSubnets(sp))
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
for _, t := range r.cfg.p2p.PubSub().GetTopics() {
|
||||
@@ -610,25 +607,37 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
secondSub := fmt.Sprintf(p2p.SyncCommitteeSubnetTopicFormat, digest, 1) + r.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, topicMap[secondSub])
|
||||
|
||||
// Expect that all old topics will be unsubscribed.
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
electraSlot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
mockNow.SetSlot(t, clock, electraSlot)
|
||||
digest = params.ForkDigest(r.cfg.clock.CurrentEpoch())
|
||||
version, e, err = params.ForkDataFromDigest(digest)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().ElectraForkVersion), version)
|
||||
require.Equal(t, params.BeaconConfig().ElectraForkEpoch, e)
|
||||
|
||||
cancel()
|
||||
sp.digest = digest
|
||||
// clear the cache and re-subscribe to subnets.
|
||||
// this should result in the subscriptions being removed
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
require.NoError(t, r.subscribeToSubnets(sp))
|
||||
assert.Equal(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
}
|
||||
|
||||
func TestIsDigestValid(t *testing.T) {
|
||||
genRoot := [32]byte{'A'}
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, genRoot[:])
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
clock := startup.NewClock(time.Now().Add(-100*time.Second), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
valid, err := isDigestValid(digest, time.Now().Add(-100*time.Second), genRoot)
|
||||
valid, err := isDigestValid(digest, clock)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, valid)
|
||||
|
||||
// Compute future fork digest that will be invalid currently.
|
||||
digest, err = signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genRoot[:])
|
||||
digest, err = signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
valid, err = isDigestValid(digest, time.Now().Add(-100*time.Second), genRoot)
|
||||
valid, err = isDigestValid(digest, clock)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, valid)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
)
|
||||
@@ -18,8 +19,8 @@ func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
assert.Equal(t, false, h.topicExists("junk"))
|
||||
assert.Equal(t, false, h.digestExists([4]byte{}))
|
||||
|
||||
digest, err := forks.CreateForkDigest(time.Now(), make([]byte, 32))
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), [32]byte{})
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
enc := encoder.SszNetworkEncoder{}
|
||||
|
||||
// Valid topic added in.
|
||||
|
||||
@@ -52,7 +52,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
|
||||
defer span.End()
|
||||
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, errInvalidTopic
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
|
||||
@@ -309,24 +309,15 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
|
||||
func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
fvs := map[[fieldparams.VersionLength]byte]primitives.Epoch{}
|
||||
fvs[bytesutil.ToBytes4(cfg.GenesisForkVersion)] = 1
|
||||
fvs[bytesutil.ToBytes4(cfg.AltairForkVersion)] = 2
|
||||
fvs[bytesutil.ToBytes4(cfg.BellatrixForkVersion)] = 3
|
||||
fvs[bytesutil.ToBytes4(cfg.CapellaForkVersion)] = 4
|
||||
fvs[bytesutil.ToBytes4(cfg.DenebForkVersion)] = 5
|
||||
fvs[bytesutil.ToBytes4(cfg.FuluForkVersion)] = 6
|
||||
fvs[bytesutil.ToBytes4(cfg.ElectraForkVersion)] = 0
|
||||
cfg.ForkVersionSchedule = fvs
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
db := dbtest.SetupDB(t)
|
||||
currentSlot := 1 + (primitives.Slot(params.BeaconConfig().ElectraForkEpoch) * params.BeaconConfig().SlotsPerEpoch)
|
||||
genesisOffset := time.Duration(currentSlot) * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
chain := &mockChain.ChainService{
|
||||
// 1 slot ago.
|
||||
Genesis: time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-1 * genesisOffset),
|
||||
ValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot,
|
||||
ValidAttestation: true,
|
||||
DB: db,
|
||||
Optimistic: true,
|
||||
@@ -347,6 +338,7 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
require.Equal(t, currentSlot, s.cfg.clock.CurrentSlot())
|
||||
s.initCaches()
|
||||
go s.verifierRoutine()
|
||||
|
||||
@@ -354,7 +346,7 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.Slot = s.cfg.clock.CurrentSlot()
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
|
||||
validBlockRoot, err := blk.Block.HashTreeRoot()
|
||||
@@ -366,10 +358,10 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
|
||||
validators := uint64(64)
|
||||
savedState, keys := util.DeterministicGenesisState(t, validators)
|
||||
require.NoError(t, savedState.SetSlot(1))
|
||||
require.NoError(t, savedState.SetSlot(s.cfg.clock.CurrentSlot()))
|
||||
require.NoError(t, db.SaveState(t.Context(), savedState, validBlockRoot))
|
||||
chain.State = savedState
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, savedState, 1, 0)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, savedState, s.cfg.clock.CurrentSlot(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
@@ -383,9 +375,9 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: validBlockRoot[:],
|
||||
CommitteeIndex: 0,
|
||||
Slot: 1,
|
||||
Slot: s.cfg.clock.CurrentSlot(),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Epoch: s.cfg.clock.CurrentEpoch(),
|
||||
Root: validBlockRoot[:],
|
||||
},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -400,9 +392,9 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: validBlockRoot[:],
|
||||
CommitteeIndex: 1,
|
||||
Slot: 1,
|
||||
Slot: s.cfg.clock.CurrentSlot(),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Epoch: s.cfg.clock.CurrentEpoch(),
|
||||
Root: validBlockRoot[:],
|
||||
},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -417,9 +409,9 @@ func TestService_validateCommitteeIndexBeaconAttestationElectra(t *testing.T) {
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: validBlockRoot[:],
|
||||
CommitteeIndex: 1,
|
||||
Slot: 1,
|
||||
Slot: s.cfg.clock.CurrentSlot(),
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Epoch: s.cfg.clock.CurrentEpoch(),
|
||||
Root: validBlockRoot[:],
|
||||
},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
|
||||
@@ -779,6 +779,8 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
@@ -791,7 +793,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
},
|
||||
ValidatorsRoot: [32]byte{},
|
||||
ValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot,
|
||||
}
|
||||
|
||||
r := &Service{
|
||||
@@ -814,7 +816,7 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, make([]byte, 32))
|
||||
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
topic := fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(b)], digest)
|
||||
m := &pubsub.Message{
|
||||
@@ -1187,6 +1189,8 @@ func TestService_isBlockQueueable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := t.Context()
|
||||
@@ -1219,7 +1223,8 @@ func TestValidateBeaconBlockPubSub_ValidExecutionPayload(t *testing.T) {
|
||||
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
chainService := &mock.ChainService{Genesis: now.Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
DB: db,
|
||||
ValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot,
|
||||
DB: db,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
@@ -1419,6 +1424,8 @@ func Test_validateBellatrixBeaconBlockParentValidation(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
db := dbtest.SetupDB(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := t.Context()
|
||||
@@ -1450,8 +1457,9 @@ func Test_validateBeaconBlockProcessingWhenParentIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &mock.ChainService{Genesis: beaconState.GenesisTime(),
|
||||
DB: db,
|
||||
Optimistic: true,
|
||||
ValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot,
|
||||
DB: db,
|
||||
Optimistic: true,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -34,7 +35,7 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, errInvalidTopic
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
@@ -68,7 +69,7 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
startTime, err := slots.StartTime(s.cfg.chain.GenesisTime(), blob.Slot())
|
||||
startTime, err := slots.StartTime(s.cfg.clock.GenesisTime(), blob.Slot())
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestValidateBlob_InvalidTopic(t *testing.T) {
|
||||
result, err := s.validateBlob(ctx, "", &pubsub.Message{
|
||||
Message: &pb.Message{},
|
||||
})
|
||||
require.ErrorIs(t, errInvalidTopic, err)
|
||||
require.ErrorIs(t, p2p.ErrInvalidTopic, err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
@@ -142,10 +142,12 @@ func TestValidateBlob_AlreadySeenInCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateBlob_InvalidTopicIndex(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0)}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, params.BeaconConfig().GenesisValidatorsRoot)}}
|
||||
s.newBlobVerifier = testNewBlobVerifier()
|
||||
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, chainService.CurrentSlot()+1, 1)
|
||||
@@ -163,7 +165,7 @@ func TestValidateBlob_InvalidTopicIndex(t *testing.T) {
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
}})
|
||||
require.ErrorContains(t, "/eth2/f5a5fd42/blob_sidecar_1", err)
|
||||
require.ErrorContains(t, "blob_sidecar_1", err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -44,7 +45,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
|
||||
// Reject messages with a nil topic.
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, errInvalidTopic
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
|
||||
// Decode the message, reject if it fails.
|
||||
@@ -180,7 +181,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
dataColumnSidecarVerificationSuccessesCounter.Inc()
|
||||
|
||||
// Get the time at slot start.
|
||||
startTime, err := slots.StartTime(s.cfg.chain.GenesisTime(), roDataColumn.SignedBlockHeader.Header.Slot)
|
||||
startTime, err := slots.StartTime(s.cfg.clock.GenesisTime(), roDataColumn.SignedBlockHeader.Header.Slot)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestValidateDataColumn(t *testing.T) {
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
|
||||
|
||||
result, err := s.validateDataColumn(ctx, "", &pubsub.Message{Message: &pb.Message{}})
|
||||
require.ErrorIs(t, errInvalidTopic, err)
|
||||
require.ErrorIs(t, p2p.ErrInvalidTopic, err)
|
||||
require.Equal(t, result, pubsub.ValidationReject)
|
||||
})
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ func (s *Service) validateSyncCommitteeMessage(
|
||||
}
|
||||
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, errInvalidTopic
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
|
||||
// Read the data from the pubsub message, and reject if there is an error.
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -224,8 +223,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(slots.PrevSlot(hState.Slot())))
|
||||
vr := [32]byte{'A'}
|
||||
clock := startup.NewClock(gt, vr)
|
||||
digest, err := forks.CreateForkDigest(gt, vr[:])
|
||||
assert.NoError(t, err)
|
||||
digest := params.ForkDigest(slots.ToEpoch(clock.CurrentSlot()))
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 5)
|
||||
|
||||
return s, actualTopic, clock
|
||||
@@ -270,8 +268,8 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
|
||||
gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(slots.PrevSlot(hState.Slot())))
|
||||
vr := [32]byte{'A'}
|
||||
digest, err := forks.CreateForkDigest(gt, vr[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(gt, vr)
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 5)
|
||||
|
||||
return s, actualTopic, startup.NewClock(gt, vr)
|
||||
@@ -324,8 +322,8 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
// Set Topic and Subnet
|
||||
gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(slots.PrevSlot(hState.Slot())))
|
||||
vr := [32]byte{'A'}
|
||||
digest, err := forks.CreateForkDigest(gt, vr[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(gt, vr)
|
||||
digest := params.ForkDigest(slots.ToEpoch(clock.CurrentSlot()))
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 5)
|
||||
|
||||
return s, actualTopic, startup.NewClock(gt, vr)
|
||||
@@ -382,8 +380,8 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
// Set Topic and Subnet
|
||||
gt := time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(slots.PrevSlot(hState.Slot())))
|
||||
vr := [32]byte{'A'}
|
||||
digest, err := forks.CreateForkDigest(gt, vr[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(gt, vr)
|
||||
digest := params.ForkDigest(slots.ToEpoch(clock.CurrentSlot()))
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 1)
|
||||
|
||||
return s, actualTopic, startup.NewClock(gt, vr)
|
||||
|
||||
@@ -35,7 +35,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
@@ -63,7 +62,7 @@ func (d signatureData) logFields() logrus.Fields {
|
||||
|
||||
func newSigCache(vr []byte, size int, gf forkLookup) *sigCache {
|
||||
if gf == nil {
|
||||
gf = forks.Fork
|
||||
gf = params.Fork
|
||||
}
|
||||
return &sigCache{Cache: lruwrpr.New(size), valRoot: vr, getFork: gf}
|
||||
}
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -108,7 +108,7 @@ func NewInitializerWaiter(cw startup.ClockWaiter, fc Forkchoicer, sr StateByRoot
|
||||
o(iw)
|
||||
}
|
||||
if iw.getFork == nil {
|
||||
iw.getFork = forks.Fork
|
||||
iw.getFork = params.Fork
|
||||
}
|
||||
return iw
|
||||
}
|
||||
|
||||
2
changelog/kasey_refactor-fork-schedules.md
Normal file
2
changelog/kasey_refactor-fork-schedules.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Refactor of fork schedule code to remove alternate methods of doing the same thing and support BPO digests.
|
||||
@@ -31,7 +31,6 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -9,13 +9,13 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/OffchainLabs/prysm/v6/crypto/ecdsa"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -178,7 +178,7 @@ func (c *client) initializeMockChainService(ctx context.Context) (*mockChain, er
|
||||
return nil, err
|
||||
}
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(genesisResp.GenesisTime.AsTime()))
|
||||
currFork, err := forks.Fork(currEpoch)
|
||||
currFork, err := params.Fork(currEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
@@ -50,11 +50,10 @@ func (c *client) statusRPCHandler(ctx context.Context, _ interface{}, stream lib
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
digest, err := forks.CreateForkDigest(resp.GenesisTime.AsTime(), resp.GenesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kindOfFork, err := forks.Fork(slots.ToEpoch(chainHead.HeadSlot))
|
||||
currentSlot := slots.CurrentSlot(resp.GenesisTime.AsTime())
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
digest := params.ForkDigest(currentEpoch)
|
||||
kindOfFork, err := params.Fork(slots.ToEpoch(chainHead.HeadSlot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ go_library(
|
||||
"config_utils_develop.go", # keep
|
||||
"config_utils_prod.go",
|
||||
"configset.go",
|
||||
"errors.go",
|
||||
"fork.go",
|
||||
"init.go",
|
||||
"interop.go",
|
||||
"io_config.go",
|
||||
@@ -29,9 +31,11 @@ go_library(
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//params:go_default_library",
|
||||
@@ -49,6 +53,8 @@ go_test(
|
||||
"checktags_test.go",
|
||||
"config_test.go",
|
||||
"configset_test.go",
|
||||
"export_test.go",
|
||||
"fork_test.go",
|
||||
"loader_test.go",
|
||||
"mainnet_config_export_test.go",
|
||||
"mainnet_config_test.go",
|
||||
@@ -75,8 +81,10 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
|
||||
@@ -2,16 +2,24 @@
|
||||
package params
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BeaconChainConfig contains constant configs for node to participate in beacon chain.
|
||||
@@ -314,6 +322,10 @@ type BeaconChainConfig struct {
|
||||
// DeprecatedMaxBlobsPerBlockFulu defines the max blobs that could exist in a block post Fulu hard fork.
|
||||
// Deprecated: This field is no longer supported. Avoid using it.
|
||||
DeprecatedMaxBlobsPerBlockFulu int `yaml:"MAX_BLOBS_PER_BLOCK_FULU" spec:"true"`
|
||||
|
||||
forkSchedule *NetworkSchedule
|
||||
bpoSchedule *NetworkSchedule
|
||||
networkSchedule *NetworkSchedule
|
||||
}
|
||||
|
||||
func (b *BeaconChainConfig) VersionToForkEpochMap() map[int]primitives.Epoch {
|
||||
@@ -335,22 +347,262 @@ func (b *BeaconChainConfig) ExecutionRequestLimits() enginev1.ExecutionRequestLi
|
||||
}
|
||||
}
|
||||
|
||||
type BlobScheduleEntry struct {
|
||||
Epoch primitives.Epoch `yaml:"EPOCH" json:"EPOCH"`
|
||||
type NetworkScheduleEntry struct {
|
||||
ForkVersion [fieldparams.VersionLength]byte
|
||||
ForkDigest [4]byte
|
||||
MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" json:"MAX_BLOBS_PER_BLOCK"`
|
||||
Epoch primitives.Epoch `yaml:"EPOCH" json:"EPOCH"`
|
||||
BPOEpoch primitives.Epoch
|
||||
VersionEnum int
|
||||
isFork bool
|
||||
}
|
||||
|
||||
func (e NetworkScheduleEntry) LogFields() log.Fields {
|
||||
gvr := BeaconConfig().GenesisValidatorsRoot
|
||||
root, err := computeForkDataRoot(e.ForkVersion, gvr)
|
||||
if err != nil {
|
||||
log.WithField("version", fmt.Sprintf("%#x", e.ForkVersion)).
|
||||
WithField("genesisValidatorsRoot", fmt.Sprintf("%#x", gvr)).
|
||||
WithError(err).Error("Failed to compute fork data root")
|
||||
}
|
||||
fields := log.Fields{
|
||||
"forkVersion": fmt.Sprintf("%#x", e.ForkVersion),
|
||||
"forkDigest": fmt.Sprintf("%#x", e.ForkDigest),
|
||||
"maxBlobsPerBlock": e.MaxBlobsPerBlock,
|
||||
"epoch": e.Epoch,
|
||||
"bpoEpoch": e.BPOEpoch,
|
||||
"isFork": e.isFork,
|
||||
"forkEnum": version.String(e.VersionEnum),
|
||||
"sanity": fmt.Sprintf("%#x", root),
|
||||
"gvr": fmt.Sprintf("%#x", gvr),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
type BlobScheduleEntry NetworkScheduleEntry
|
||||
|
||||
func (b *BeaconChainConfig) ApplyOptions(opts ...Option) {
|
||||
for _, opt := range opts {
|
||||
opt(b)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this needs to be able to return an error
|
||||
// InitializeForkSchedule initializes the schedules forks baked into the config.
|
||||
func (b *BeaconChainConfig) InitializeForkSchedule() {
|
||||
// Reset Fork Version Schedule.
|
||||
b.ForkVersionSchedule = configForkSchedule(b)
|
||||
b.ForkVersionNames = configForkNames(b)
|
||||
b.forkSchedule = initForkSchedule(b)
|
||||
b.bpoSchedule = initBPOSchedule(b)
|
||||
combined := b.forkSchedule.merge(b.bpoSchedule)
|
||||
if err := combined.prepare(b); err != nil {
|
||||
log.WithError(err).Error("Failed to prepare network schedule")
|
||||
}
|
||||
b.networkSchedule = combined
|
||||
}
|
||||
|
||||
func LogDigests(b *BeaconChainConfig) {
|
||||
schedule := b.networkSchedule
|
||||
for _, e := range schedule.entries {
|
||||
log.WithFields(e.LogFields()).Debug("Network schedule entry initialized")
|
||||
digests := make([]string, 0, len(schedule.byDigest))
|
||||
for k := range schedule.byDigest {
|
||||
digests = append(digests, fmt.Sprintf("%#x", k))
|
||||
}
|
||||
log.WithField("digest_keys", strings.Join(digests, ", ")).Debug("Digests seen")
|
||||
}
|
||||
}
|
||||
|
||||
type NetworkSchedule struct {
|
||||
entries []NetworkScheduleEntry
|
||||
byEpoch map[primitives.Epoch]*NetworkScheduleEntry
|
||||
byVersion map[[fieldparams.VersionLength]byte]*NetworkScheduleEntry
|
||||
byDigest map[[4]byte]*NetworkScheduleEntry
|
||||
}
|
||||
|
||||
func newNetworkSchedule(entries []NetworkScheduleEntry) *NetworkSchedule {
|
||||
return &NetworkSchedule{
|
||||
entries: entries,
|
||||
byEpoch: make(map[primitives.Epoch]*NetworkScheduleEntry),
|
||||
byVersion: make(map[[fieldparams.VersionLength]byte]*NetworkScheduleEntry),
|
||||
byDigest: make(map[[4]byte]*NetworkScheduleEntry),
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) epochIdx(epoch primitives.Epoch) int {
|
||||
for i := len(ns.entries) - 1; i >= 0; i-- {
|
||||
if ns.entries[i].Epoch <= epoch {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) Next(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
lastIdx := len(ns.entries) - 1
|
||||
idx := ns.epochIdx(epoch)
|
||||
if idx < 0 {
|
||||
return ns.entries[0]
|
||||
}
|
||||
if idx == lastIdx {
|
||||
return ns.entries[lastIdx]
|
||||
}
|
||||
return ns.entries[idx+1]
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) LastEntry() NetworkScheduleEntry {
|
||||
for i := len(ns.entries) - 1; i >= 0; i-- {
|
||||
if ns.entries[i].Epoch != BeaconConfig().FarFutureEpoch {
|
||||
return ns.entries[i]
|
||||
}
|
||||
}
|
||||
return ns.entries[0]
|
||||
}
|
||||
|
||||
// LastFork is the last full fork (this is used by e2e testing)
|
||||
func (ns *NetworkSchedule) LastFork() NetworkScheduleEntry {
|
||||
for i := len(ns.entries) - 1; i >= 0; i-- {
|
||||
if ns.entries[i].isFork && ns.entries[i].Epoch != BeaconConfig().FarFutureEpoch {
|
||||
return ns.entries[i]
|
||||
}
|
||||
}
|
||||
return ns.entries[0]
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) ForEpoch(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
idx := ns.epochIdx(epoch)
|
||||
if idx < 0 {
|
||||
return ns.entries[0]
|
||||
}
|
||||
if idx >= len(ns.entries)-1 {
|
||||
return ns.entries[len(ns.entries)-1]
|
||||
}
|
||||
return ns.entries[idx]
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) activatedAt(epoch primitives.Epoch) (*NetworkScheduleEntry, bool) {
|
||||
entry, ok := ns.byEpoch[epoch]
|
||||
return entry, ok
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) merge(other *NetworkSchedule) *NetworkSchedule {
|
||||
merged := make([]NetworkScheduleEntry, 0, len(ns.entries)+len(other.entries))
|
||||
merged = append(merged, ns.entries...)
|
||||
merged = append(merged, other.entries...)
|
||||
sort.Slice(merged, func(i, j int) bool {
|
||||
if merged[i].Epoch == merged[j].Epoch {
|
||||
if merged[i].VersionEnum == merged[j].VersionEnum {
|
||||
return merged[i].isFork
|
||||
}
|
||||
return merged[i].VersionEnum < merged[j].VersionEnum
|
||||
}
|
||||
return merged[i].Epoch < merged[j].Epoch
|
||||
})
|
||||
return newNetworkSchedule(merged)
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) index(e NetworkScheduleEntry) {
|
||||
if _, ok := ns.byDigest[e.ForkDigest]; !ok {
|
||||
ns.byDigest[e.ForkDigest] = &e
|
||||
}
|
||||
if _, ok := ns.byVersion[e.ForkVersion]; !ok {
|
||||
ns.byVersion[e.ForkVersion] = &e
|
||||
}
|
||||
if _, ok := ns.byEpoch[e.Epoch]; !ok {
|
||||
ns.byEpoch[e.Epoch] = &e
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) prepare(b *BeaconChainConfig) error {
|
||||
if len(ns.entries) == 0 {
|
||||
return errors.New("cannot compute digests for an empty network schedule")
|
||||
}
|
||||
if !ns.entries[0].isFork {
|
||||
return errors.New("cannot compute digests for a network schedule without a fork entry")
|
||||
}
|
||||
lastFork, err := entryWithForkDigest(ns.entries[0], b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ns.entries[0] = lastFork
|
||||
ns.index(ns.entries[0])
|
||||
var lastBlobs *NetworkScheduleEntry
|
||||
for i := 1; i < len(ns.entries); i++ {
|
||||
entry := ns.entries[i]
|
||||
|
||||
if entry.isFork {
|
||||
lastFork = entry
|
||||
} else {
|
||||
entry.ForkVersion = lastFork.ForkVersion
|
||||
entry.VersionEnum = lastFork.VersionEnum
|
||||
}
|
||||
|
||||
if entry.MaxBlobsPerBlock > 0 || !entry.isFork {
|
||||
entry.BPOEpoch = entry.Epoch
|
||||
lastBlobs = &entry
|
||||
} else if lastBlobs != nil {
|
||||
entry.MaxBlobsPerBlock = lastBlobs.MaxBlobsPerBlock
|
||||
entry.BPOEpoch = lastBlobs.BPOEpoch
|
||||
}
|
||||
|
||||
entry, err = entryWithForkDigest(entry, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ns.entries[i] = entry
|
||||
ns.index(entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func entryWithForkDigest(entry NetworkScheduleEntry, b *BeaconChainConfig) (NetworkScheduleEntry, error) {
|
||||
root, err := computeForkDataRoot(entry.ForkVersion, b.GenesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return entry, err
|
||||
}
|
||||
entry.ForkDigest = to4(root[:])
|
||||
if entry.Epoch < b.FuluForkEpoch {
|
||||
return entry, nil
|
||||
}
|
||||
if entry.MaxBlobsPerBlock > math.MaxUint32 {
|
||||
return entry, fmt.Errorf("max blobs per block exceeds maximum uint32 value")
|
||||
}
|
||||
hb := make([]byte, 16)
|
||||
binary.LittleEndian.PutUint64(hb[0:8], uint64(entry.BPOEpoch))
|
||||
binary.LittleEndian.PutUint64(hb[8:], entry.MaxBlobsPerBlock)
|
||||
bpoHash := hash.Hash(hb)
|
||||
entry.ForkDigest[0] = entry.ForkDigest[0] ^ bpoHash[0]
|
||||
entry.ForkDigest[1] = entry.ForkDigest[1] ^ bpoHash[1]
|
||||
entry.ForkDigest[2] = entry.ForkDigest[2] ^ bpoHash[2]
|
||||
entry.ForkDigest[3] = entry.ForkDigest[3] ^ bpoHash[3]
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
var to4 = bytesutil.ToBytes4
|
||||
|
||||
func initForkSchedule(b *BeaconChainConfig) *NetworkSchedule {
|
||||
return newNetworkSchedule([]NetworkScheduleEntry{
|
||||
{Epoch: b.GenesisEpoch, isFork: true, ForkVersion: to4(b.GenesisForkVersion), VersionEnum: version.Phase0},
|
||||
{Epoch: b.AltairForkEpoch, isFork: true, ForkVersion: to4(b.AltairForkVersion), VersionEnum: version.Altair},
|
||||
{Epoch: b.BellatrixForkEpoch, isFork: true, ForkVersion: to4(b.BellatrixForkVersion), VersionEnum: version.Bellatrix},
|
||||
{Epoch: b.CapellaForkEpoch, isFork: true, ForkVersion: to4(b.CapellaForkVersion), VersionEnum: version.Capella},
|
||||
{Epoch: b.DenebForkEpoch, isFork: true, ForkVersion: to4(b.DenebForkVersion), MaxBlobsPerBlock: uint64(b.DeprecatedMaxBlobsPerBlock), VersionEnum: version.Deneb},
|
||||
{Epoch: b.ElectraForkEpoch, isFork: true, ForkVersion: to4(b.ElectraForkVersion), MaxBlobsPerBlock: uint64(b.DeprecatedMaxBlobsPerBlockElectra), VersionEnum: version.Electra},
|
||||
{Epoch: b.FuluForkEpoch, isFork: true, ForkVersion: to4(b.FuluForkVersion), VersionEnum: version.Fulu},
|
||||
})
|
||||
}
|
||||
|
||||
func initBPOSchedule(b *BeaconChainConfig) *NetworkSchedule {
|
||||
sort.Slice(b.BlobSchedule, func(i, j int) bool {
|
||||
return b.BlobSchedule[i].Epoch < b.BlobSchedule[j].Epoch
|
||||
})
|
||||
entries := make([]NetworkScheduleEntry, len(b.BlobSchedule))
|
||||
for i := range b.BlobSchedule {
|
||||
entries[i] = NetworkScheduleEntry(b.BlobSchedule[i])
|
||||
entries[i].BPOEpoch = entries[i].Epoch
|
||||
}
|
||||
return newNetworkSchedule(entries)
|
||||
}
|
||||
|
||||
func configForkSchedule(b *BeaconChainConfig) map[[fieldparams.VersionLength]byte]primitives.Epoch {
|
||||
|
||||
@@ -2,6 +2,7 @@ package params_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
// Test cases can be executed in an arbitrary order. TestOverrideBeaconConfigTestTeardown checks
|
||||
@@ -185,3 +187,82 @@ func Test_TargetBlobCount(t *testing.T) {
|
||||
require.Equal(t, cfg.TargetBlobsPerBlock(primitives.Slot(cfg.ElectraForkEpoch)*cfg.SlotsPerEpoch), 6)
|
||||
cfg.ElectraForkEpoch = math.MaxUint64
|
||||
}
|
||||
|
||||
func fillGVR(value byte) [32]byte {
|
||||
var gvr [32]byte
|
||||
for i := 0; i < len(gvr); i++ {
|
||||
gvr[i] = value
|
||||
}
|
||||
return gvr
|
||||
}
|
||||
|
||||
func TestEntryWithForkDigest(t *testing.T) {
|
||||
var zero [32]byte
|
||||
one := fillGVR(byte(1))
|
||||
two := fillGVR(byte(2))
|
||||
three := fillGVR(byte(3))
|
||||
configs := map[[32]byte]*params.BeaconChainConfig{
|
||||
zero: testConfigForSchedule(zero),
|
||||
one: testConfigForSchedule(one),
|
||||
two: testConfigForSchedule(two),
|
||||
three: testConfigForSchedule(three),
|
||||
}
|
||||
for _, cfg := range configs {
|
||||
cfg.InitializeForkSchedule()
|
||||
}
|
||||
cases := []struct {
|
||||
epoch primitives.Epoch
|
||||
gvr [32]byte
|
||||
expected string
|
||||
}{
|
||||
{epoch: 9, expected: "0x97b2c268"},
|
||||
{epoch: 10, expected: "0x97b2c268"},
|
||||
{epoch: 11, expected: "0x97b2c268"},
|
||||
{epoch: 99, expected: "0x97b2c268"},
|
||||
{epoch: 100, expected: "0x44a571e8"},
|
||||
{epoch: 101, expected: "0x44a571e8"},
|
||||
{epoch: 150, expected: "0x1171afca"},
|
||||
{epoch: 199, expected: "0x1171afca"},
|
||||
{epoch: 200, expected: "0x427a30ab"},
|
||||
{epoch: 201, expected: "0x427a30ab"},
|
||||
{epoch: 250, expected: "0xd5310ef1"},
|
||||
{epoch: 299, expected: "0xd5310ef1"},
|
||||
{epoch: 300, expected: "0x51d229f7"},
|
||||
{epoch: 301, expected: "0x51d229f7"},
|
||||
{epoch: 9, gvr: fillGVR(byte(1)), expected: "0x4a5c3011"},
|
||||
{epoch: 9, gvr: fillGVR(byte(2)), expected: "0xe8332b52"},
|
||||
{epoch: 9, gvr: fillGVR(byte(3)), expected: "0x0e38e75e"},
|
||||
{epoch: 100, gvr: fillGVR(byte(1)), expected: "0xbfe98545"},
|
||||
{epoch: 100, gvr: fillGVR(byte(2)), expected: "0x9b7e4788"},
|
||||
{epoch: 100, gvr: fillGVR(byte(3)), expected: "0x8b5ce4af"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("%d_%s", c.epoch, c.expected), func(t *testing.T) {
|
||||
var expected [4]byte
|
||||
err := hexutil.UnmarshalFixedText("ForkDigest", []byte(c.expected), expected[:])
|
||||
require.NoError(t, err)
|
||||
cfg := configs[c.gvr]
|
||||
digest := params.ForkDigestUsingConfig(c.epoch, cfg)
|
||||
require.Equal(t, expected, digest)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigForSchedule(gvr [32]byte) *params.BeaconChainConfig {
|
||||
cfg := params.MinimalSpecConfig().Copy()
|
||||
cfg.AltairForkEpoch = 0
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
cfg.CapellaForkEpoch = 0
|
||||
cfg.DenebForkEpoch = 0
|
||||
cfg.ElectraForkEpoch = 9
|
||||
cfg.FuluForkEpoch = 100
|
||||
cfg.GenesisValidatorsRoot = gvr
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 100, MaxBlobsPerBlock: 100},
|
||||
{Epoch: 150, MaxBlobsPerBlock: 175},
|
||||
{Epoch: 200, MaxBlobsPerBlock: 200},
|
||||
{Epoch: 250, MaxBlobsPerBlock: 275},
|
||||
{Epoch: 300, MaxBlobsPerBlock: 300},
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ func BeaconConfig() *BeaconChainConfig {
|
||||
// OverrideBeaconConfig(c). Any subsequent calls to params.BeaconConfig() will
|
||||
// return this new configuration.
|
||||
func OverrideBeaconConfig(c *BeaconChainConfig) {
|
||||
c.InitializeForkSchedule()
|
||||
cfgrw.Lock()
|
||||
defer cfgrw.Unlock()
|
||||
configs.active = c
|
||||
|
||||
@@ -16,6 +16,7 @@ func BeaconConfig() *BeaconChainConfig {
|
||||
// OverrideBeaconConfig(c). Any subsequent calls to params.BeaconConfig() will
|
||||
// return this new configuration.
|
||||
func OverrideBeaconConfig(c *BeaconChainConfig) {
|
||||
c.InitializeForkSchedule()
|
||||
configs.active = c
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package forks
|
||||
package params
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
3
config/params/export_test.go
Normal file
3
config/params/export_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package params
|
||||
|
||||
var ComputeForkDataRoot = computeForkDataRoot
|
||||
104
config/params/fork.go
Normal file
104
config/params/fork.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package params
|
||||
|
||||
import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DigestChangesAfter checks if an allotted fork is in the following epoch.
|
||||
func DigestChangesAfter(e primitives.Epoch) bool {
|
||||
_, ok := BeaconConfig().networkSchedule.activatedAt(e + 1)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ForkDigestUsingConfig retrieves the fork digest from the current schedule determined
|
||||
// by the provided epoch.
|
||||
func ForkDigestUsingConfig(epoch primitives.Epoch, cfg *BeaconChainConfig) [4]byte {
|
||||
entry := cfg.networkSchedule.ForEpoch(epoch)
|
||||
return entry.ForkDigest
|
||||
}
|
||||
|
||||
func ForkDigest(epoch primitives.Epoch) [4]byte {
|
||||
return ForkDigestUsingConfig(epoch, BeaconConfig())
|
||||
}
|
||||
|
||||
func computeForkDataRoot(version [fieldparams.VersionLength]byte, root [32]byte) ([32]byte, error) {
|
||||
r, err := (ðpb.ForkData{
|
||||
CurrentVersion: version[:],
|
||||
GenesisValidatorsRoot: root[:],
|
||||
}).HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Fork returns the fork version for the given epoch.
|
||||
func Fork(epoch primitives.Epoch) (*ethpb.Fork, error) {
|
||||
cfg := BeaconConfig()
|
||||
return ForkFromConfig(cfg, epoch), nil
|
||||
}
|
||||
|
||||
func ForkFromConfig(cfg *BeaconChainConfig, epoch primitives.Epoch) *ethpb.Fork {
|
||||
current := cfg.networkSchedule.ForEpoch(epoch)
|
||||
previous := current
|
||||
if current.Epoch > 0 {
|
||||
previous = cfg.networkSchedule.ForEpoch(current.Epoch - 1)
|
||||
}
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: previous.ForkVersion[:],
|
||||
CurrentVersion: current.ForkVersion[:],
|
||||
Epoch: current.Epoch,
|
||||
}
|
||||
}
|
||||
|
||||
// ForkDataFromDigest performs the inverse, where it tries to determine the fork version
|
||||
// and epoch from a provided digest by looping through our current fork schedule.
|
||||
func ForkDataFromDigest(digest [4]byte) ([fieldparams.VersionLength]byte, primitives.Epoch, error) {
|
||||
cfg := BeaconConfig()
|
||||
entry, ok := cfg.networkSchedule.byDigest[digest]
|
||||
if !ok {
|
||||
return [fieldparams.VersionLength]byte{}, 0, errors.Errorf("no fork exists for a digest of %#x", digest)
|
||||
}
|
||||
return entry.ForkVersion, entry.Epoch, nil
|
||||
}
|
||||
|
||||
// NextForkData retrieves the next fork data according to the
|
||||
// provided current epoch.
|
||||
func NextForkData(epoch primitives.Epoch) ([fieldparams.VersionLength]byte, primitives.Epoch) {
|
||||
entry := BeaconConfig().networkSchedule.Next(epoch)
|
||||
return entry.ForkVersion, entry.Epoch
|
||||
}
|
||||
|
||||
func NextNetworkScheduleEntry(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
entry := BeaconConfig().networkSchedule.Next(epoch)
|
||||
return entry
|
||||
}
|
||||
|
||||
func SortedNetworkScheduleEntries() []NetworkScheduleEntry {
|
||||
return BeaconConfig().networkSchedule.entries
|
||||
}
|
||||
|
||||
func SortedForkSchedule() []NetworkScheduleEntry {
|
||||
entries := BeaconConfig().networkSchedule.entries
|
||||
schedule := make([]NetworkScheduleEntry, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if entry.isFork {
|
||||
schedule = append(schedule, entry)
|
||||
}
|
||||
}
|
||||
return schedule
|
||||
}
|
||||
|
||||
// LastForkEpoch returns the last valid fork epoch that exists in our
|
||||
// fork schedule.
|
||||
func LastForkEpoch() primitives.Epoch {
|
||||
return BeaconConfig().networkSchedule.LastFork().Epoch
|
||||
}
|
||||
|
||||
func GetNetworkScheduleEntry(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
entry := BeaconConfig().networkSchedule.ForEpoch(epoch)
|
||||
return entry
|
||||
}
|
||||
188
config/params/fork_test.go
Normal file
188
config/params/fork_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package params_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
targetEpoch primitives.Epoch
|
||||
want *ethpb.Fork
|
||||
wantErr bool
|
||||
setConfg func()
|
||||
}{
|
||||
{
|
||||
name: "genesis fork",
|
||||
targetEpoch: 0,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.GenesisEpoch,
|
||||
CurrentVersion: cfg.GenesisForkVersion,
|
||||
PreviousVersion: cfg.GenesisForkVersion,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair on fork",
|
||||
targetEpoch: cfg.AltairForkEpoch,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.AltairForkEpoch,
|
||||
CurrentVersion: cfg.AltairForkVersion,
|
||||
PreviousVersion: cfg.GenesisForkVersion,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair post fork",
|
||||
targetEpoch: cfg.CapellaForkEpoch + 1,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.CapellaForkEpoch,
|
||||
CurrentVersion: cfg.CapellaForkVersion,
|
||||
PreviousVersion: cfg.BellatrixForkVersion,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 forks, pre-fork",
|
||||
targetEpoch: cfg.ElectraForkEpoch - 1,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.DenebForkEpoch,
|
||||
CurrentVersion: cfg.DenebForkVersion,
|
||||
PreviousVersion: cfg.CapellaForkVersion,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 forks, on fork",
|
||||
targetEpoch: cfg.ElectraForkEpoch,
|
||||
want: ðpb.Fork{
|
||||
Epoch: cfg.ElectraForkEpoch,
|
||||
CurrentVersion: cfg.ElectraForkVersion,
|
||||
PreviousVersion: cfg.DenebForkVersion,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
copied := cfg.Copy()
|
||||
params.OverrideBeaconConfig(copied)
|
||||
got, err := params.Fork(tt.targetEpoch)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Fork() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Fork() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetrieveForkDataFromDigest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
digest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
version, epoch, err := params.ForkDataFromDigest(digest)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().AltairForkVersion), version)
|
||||
require.Equal(t, params.BeaconConfig().AltairForkEpoch, epoch)
|
||||
}
|
||||
|
||||
func TestIsForkNextEpoch(t *testing.T) {
|
||||
// at
|
||||
assert.Equal(t, false, params.DigestChangesAfter(params.BeaconConfig().ElectraForkEpoch))
|
||||
// just before
|
||||
assert.Equal(t, true, params.DigestChangesAfter(params.BeaconConfig().ElectraForkEpoch-1))
|
||||
// just after
|
||||
assert.Equal(t, false, params.DigestChangesAfter(params.BeaconConfig().ElectraForkEpoch+1))
|
||||
}
|
||||
|
||||
func TestNextForkData(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
cfg := params.BeaconConfig()
|
||||
require.Equal(t, true, params.LastForkEpoch() < cfg.FarFutureEpoch)
|
||||
tests := []struct {
|
||||
name string
|
||||
setConfg func()
|
||||
currEpoch primitives.Epoch
|
||||
wantedForkVersion [4]byte
|
||||
wantedEpoch primitives.Epoch
|
||||
}{
|
||||
{
|
||||
name: "genesis",
|
||||
currEpoch: 0,
|
||||
wantedForkVersion: [4]byte(cfg.AltairForkVersion),
|
||||
wantedEpoch: cfg.AltairForkEpoch,
|
||||
},
|
||||
{
|
||||
name: "altair pre-fork",
|
||||
currEpoch: cfg.AltairForkEpoch - 1,
|
||||
wantedForkVersion: [4]byte(cfg.AltairForkVersion),
|
||||
wantedEpoch: cfg.AltairForkEpoch,
|
||||
},
|
||||
{
|
||||
name: "altair on fork",
|
||||
currEpoch: cfg.AltairForkEpoch,
|
||||
wantedForkVersion: [4]byte(cfg.BellatrixForkVersion),
|
||||
wantedEpoch: cfg.BellatrixForkEpoch,
|
||||
},
|
||||
|
||||
{
|
||||
name: "altair post fork",
|
||||
currEpoch: cfg.AltairForkEpoch + 1,
|
||||
wantedForkVersion: [4]byte(cfg.BellatrixForkVersion),
|
||||
wantedEpoch: cfg.BellatrixForkEpoch,
|
||||
},
|
||||
{
|
||||
name: "after last bpo - should be far future epoch and 0x00000000",
|
||||
currEpoch: params.LastForkEpoch() + 1,
|
||||
wantedForkVersion: [4]byte(cfg.FuluForkVersion),
|
||||
wantedEpoch: cfg.FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg.Copy())
|
||||
fVersion, fEpoch := params.NextForkData(tt.currEpoch)
|
||||
if fVersion != tt.wantedForkVersion {
|
||||
t.Errorf("NextForkData() fork version = %v, want %v", fVersion, tt.wantedForkVersion)
|
||||
}
|
||||
if fEpoch != tt.wantedEpoch {
|
||||
t.Errorf("NextForkData() fork epoch = %v, want %v", fEpoch, tt.wantedEpoch)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastForkEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
require.Equal(t, cfg.ElectraForkEpoch, params.LastForkEpoch())
|
||||
}
|
||||
|
||||
func TestForkFromConfig_UsesPassedConfig(t *testing.T) {
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.AltairForkVersion = []byte{0x02, 0x00, 0x00, 0x00}
|
||||
testCfg.GenesisForkVersion = []byte{0x03, 0x00, 0x00, 0x00}
|
||||
testCfg.AltairForkEpoch = 100
|
||||
testCfg.InitializeForkSchedule()
|
||||
|
||||
// Test at Altair fork epoch - should use the passed config's versions
|
||||
fork := params.ForkFromConfig(testCfg, testCfg.AltairForkEpoch)
|
||||
|
||||
want := ðpb.Fork{
|
||||
Epoch: testCfg.AltairForkEpoch,
|
||||
CurrentVersion: testCfg.AltairForkVersion,
|
||||
PreviousVersion: testCfg.GenesisForkVersion,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(fork, want) {
|
||||
t.Errorf("ForkFromConfig() got = %v, want %v", fork, want)
|
||||
}
|
||||
}
|
||||
@@ -11,6 +11,6 @@ type Option func(*BeaconChainConfig)
|
||||
func WithGenesisValidatorsRoot(gvr [32]byte) Option {
|
||||
return func(cfg *BeaconChainConfig) {
|
||||
cfg.GenesisValidatorsRoot = gvr
|
||||
log.WithField("genesis_validators_root", fmt.Sprintf("%#x", gvr)).Info("Overriding genesis validators root")
|
||||
log.WithField("genesis_validators_root", fmt.Sprintf("%#x", gvr)).Info("Setting genesis validators root")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -55,14 +54,12 @@ func FromBlock(marshaled []byte) (*VersionedUnmarshaler, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copiedCfg := params.BeaconConfig().Copy()
|
||||
epoch := slots.ToEpoch(slot)
|
||||
fs := forks.NewOrderedSchedule(copiedCfg)
|
||||
ver, err := fs.VersionForEpoch(epoch)
|
||||
fs, err := params.Fork(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return FromForkVersion(ver)
|
||||
return FromForkVersion([4]byte(fs.CurrentVersion))
|
||||
}
|
||||
|
||||
var ErrForkNotFound = errors.New("version found in fork schedule but can't be matched to a named fork")
|
||||
@@ -282,13 +279,13 @@ func (cf *VersionedUnmarshaler) UnmarshalBlindedBeaconBlock(marshaled []byte) (i
|
||||
// VersionedUnmarshaler.
|
||||
func (cf *VersionedUnmarshaler) validateVersion(slot primitives.Slot) error {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
fs := forks.NewOrderedSchedule(cf.Config)
|
||||
ver, err := fs.VersionForEpoch(epoch)
|
||||
fork, err := params.Fork(epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ver := [4]byte(fork.CurrentVersion)
|
||||
if ver != cf.Version {
|
||||
return errors.Wrapf(errBlockForkMismatch, "slot=%d, epoch=%d, version=%#x", slot, epoch, ver)
|
||||
return errors.Wrapf(errBlockForkMismatch, "slot=%d, epoch=%d, version=%#x", slot, epoch, fork.CurrentVersion)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -112,11 +112,9 @@ func TestByState(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
st, err := stateForVersion(c.version)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: make([]byte, 4),
|
||||
CurrentVersion: c.forkversion[:],
|
||||
Epoch: 0,
|
||||
}))
|
||||
fork, err := params.Fork(slots.ToEpoch(c.slot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
require.NoError(t, st.SetSlot(c.slot))
|
||||
m, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"errors.go",
|
||||
"fork.go",
|
||||
"ordered.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/network/forks",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"fork_test.go",
|
||||
"ordered_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,202 +0,0 @@
|
||||
// Package forks contains useful helpers for Ethereum consensus fork-related functionality.
|
||||
package forks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// IsForkNextEpoch checks if an allotted fork is in the following epoch.
|
||||
func IsForkNextEpoch(genesisTime time.Time, genesisValidatorsRoot []byte) (bool, error) {
|
||||
if genesisTime.IsZero() {
|
||||
return false, errors.New("genesis time is not set")
|
||||
}
|
||||
if len(genesisValidatorsRoot) == 0 {
|
||||
return false, errors.New("genesis validators root is not set")
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
fSchedule := params.BeaconConfig().ForkVersionSchedule
|
||||
scheduledForks := SortedForkVersions(fSchedule)
|
||||
isForkEpoch := false
|
||||
for _, forkVersion := range scheduledForks {
|
||||
epoch := fSchedule[forkVersion]
|
||||
if currentEpoch+1 == epoch {
|
||||
isForkEpoch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return isForkEpoch, nil
|
||||
}
|
||||
|
||||
// ForkDigestFromEpoch retrieves the fork digest from the current schedule determined
|
||||
// by the provided epoch.
|
||||
func ForkDigestFromEpoch(currentEpoch primitives.Epoch, genesisValidatorsRoot []byte) ([4]byte, error) {
|
||||
if len(genesisValidatorsRoot) == 0 {
|
||||
return [4]byte{}, errors.New("genesis validators root is not set")
|
||||
}
|
||||
forkData, err := Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return [4]byte{}, err
|
||||
}
|
||||
return signing.ComputeForkDigest(forkData.CurrentVersion, genesisValidatorsRoot)
|
||||
}
|
||||
|
||||
// CreateForkDigest creates a fork digest from a genesis time and genesis
|
||||
// validators root, utilizing the current slot to determine
|
||||
// the active fork version in the node.
|
||||
func CreateForkDigest(
|
||||
genesisTime time.Time,
|
||||
genesisValidatorsRoot []byte,
|
||||
) ([4]byte, error) {
|
||||
if genesisTime.IsZero() {
|
||||
return [4]byte{}, errors.New("genesis time is not set")
|
||||
}
|
||||
if len(genesisValidatorsRoot) == 0 {
|
||||
return [4]byte{}, errors.New("genesis validators root is not set")
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
forkData, err := Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return [4]byte{}, err
|
||||
}
|
||||
|
||||
digest, err := signing.ComputeForkDigest(forkData.CurrentVersion, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return [4]byte{}, err
|
||||
}
|
||||
return digest, nil
|
||||
}
|
||||
|
||||
// Fork given a target epoch,
|
||||
// returns the active fork version during this epoch.
|
||||
func Fork(
|
||||
targetEpoch primitives.Epoch,
|
||||
) (*ethpb.Fork, error) {
|
||||
currentForkVersion := bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)
|
||||
previousForkVersion := bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)
|
||||
fSchedule := params.BeaconConfig().ForkVersionSchedule
|
||||
sortedForkVersions := SortedForkVersions(fSchedule)
|
||||
forkEpoch := primitives.Epoch(0)
|
||||
for _, forkVersion := range sortedForkVersions {
|
||||
epoch, ok := fSchedule[forkVersion]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("fork version %x doesn't exist in schedule", forkVersion)
|
||||
}
|
||||
if targetEpoch >= epoch {
|
||||
previousForkVersion = currentForkVersion
|
||||
currentForkVersion = forkVersion
|
||||
forkEpoch = epoch
|
||||
}
|
||||
}
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: previousForkVersion[:],
|
||||
CurrentVersion: currentForkVersion[:],
|
||||
Epoch: forkEpoch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RetrieveForkDataFromDigest performs the inverse, where it tries to determine the fork version
|
||||
// and epoch from a provided digest by looping through our current fork schedule.
|
||||
func RetrieveForkDataFromDigest(digest [4]byte, genesisValidatorsRoot []byte) ([4]byte, primitives.Epoch, error) {
|
||||
fSchedule := params.BeaconConfig().ForkVersionSchedule
|
||||
for v, e := range fSchedule {
|
||||
rDigest, err := signing.ComputeForkDigest(v[:], genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return [4]byte{}, 0, err
|
||||
}
|
||||
if rDigest == digest {
|
||||
return v, e, nil
|
||||
}
|
||||
}
|
||||
return [4]byte{}, 0, errors.Errorf("no fork exists for a digest of %#x", digest)
|
||||
}
|
||||
|
||||
// NextForkData retrieves the next fork data according to the
|
||||
// provided current epoch.
|
||||
func NextForkData(currEpoch primitives.Epoch) ([4]byte, primitives.Epoch, error) {
|
||||
fSchedule := params.BeaconConfig().ForkVersionSchedule
|
||||
sortedForkVersions := SortedForkVersions(fSchedule)
|
||||
nextForkEpoch := primitives.Epoch(math.MaxUint64)
|
||||
var nextForkVersion [4]byte
|
||||
for _, forkVersion := range sortedForkVersions {
|
||||
epoch, ok := fSchedule[forkVersion]
|
||||
if !ok {
|
||||
return [4]byte{}, 0, errors.Errorf("fork version %x doesn't exist in schedule", forkVersion)
|
||||
}
|
||||
// If we get an epoch larger than out current epoch
|
||||
// we set this as our next fork epoch and exit the
|
||||
// loop.
|
||||
if epoch > currEpoch {
|
||||
nextForkEpoch = epoch
|
||||
nextForkVersion = forkVersion
|
||||
break
|
||||
}
|
||||
// In the event the retrieved epoch is less than
|
||||
// our current epoch, we mark the previous
|
||||
// fork's version as the next fork version.
|
||||
if epoch <= currEpoch {
|
||||
// The next fork version is updated to
|
||||
// always include the most current fork version.
|
||||
nextForkVersion = forkVersion
|
||||
}
|
||||
}
|
||||
return nextForkVersion, nextForkEpoch, nil
|
||||
}
|
||||
|
||||
// SortedForkVersions sorts the provided fork schedule in ascending order
|
||||
// by epoch.
|
||||
func SortedForkVersions(forkSchedule map[[4]byte]primitives.Epoch) [][4]byte {
|
||||
sortedVersions := make([][4]byte, len(forkSchedule))
|
||||
i := 0
|
||||
for k := range forkSchedule {
|
||||
sortedVersions[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Slice(sortedVersions, func(a, b int) bool {
|
||||
// va == "version" a, ie the [4]byte version id
|
||||
va, vb := sortedVersions[a], sortedVersions[b]
|
||||
// ea == "epoch" a, ie the types.Epoch corresponding to va
|
||||
ea, eb := forkSchedule[va], forkSchedule[vb]
|
||||
// Try to sort by epochs first, which works fine when epochs are all distinct.
|
||||
// in the case of testnets starting from a given fork, all epochs leading to the fork will be zero.
|
||||
if ea != eb {
|
||||
return ea < eb
|
||||
}
|
||||
// If the epochs are equal, break the tie with a lexicographic comparison of the fork version bytes.
|
||||
// eg 2 versions both with a fork epoch of 0, 0x00000000 would come before 0x01000000.
|
||||
// sort.Slice takes a 'less' func, ie `return a < b`, and when va < vb, bytes.Compare will return -1
|
||||
return bytes.Compare(va[:], vb[:]) < 0
|
||||
})
|
||||
return sortedVersions
|
||||
}
|
||||
|
||||
// LastForkEpoch returns the last valid fork epoch that exists in our
|
||||
// fork schedule.
|
||||
func LastForkEpoch() primitives.Epoch {
|
||||
fSchedule := params.BeaconConfig().ForkVersionSchedule
|
||||
sortedForkVersions := SortedForkVersions(fSchedule)
|
||||
lastValidEpoch := primitives.Epoch(0)
|
||||
numOfVersions := len(sortedForkVersions)
|
||||
for i := numOfVersions - 1; i >= 0; i-- {
|
||||
v := sortedForkVersions[i]
|
||||
fEpoch := fSchedule[v]
|
||||
if fEpoch != math.MaxUint64 {
|
||||
lastValidEpoch = fEpoch
|
||||
break
|
||||
}
|
||||
}
|
||||
return lastValidEpoch
|
||||
}
|
||||
@@ -1,485 +0,0 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
)
|
||||
|
||||
func TestFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
targetEpoch primitives.Epoch
|
||||
want *ethpb.Fork
|
||||
wantErr bool
|
||||
setConfg func()
|
||||
}{
|
||||
{
|
||||
name: "no fork",
|
||||
targetEpoch: 0,
|
||||
want: ðpb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: []byte{'A', 'B', 'C', 'D'},
|
||||
PreviousVersion: []byte{'A', 'B', 'C', 'D'},
|
||||
},
|
||||
wantErr: false,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "genesis fork",
|
||||
targetEpoch: 0,
|
||||
want: ðpb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: []byte{'A', 'B', 'C', 'D'},
|
||||
PreviousVersion: []byte{'A', 'B', 'C', 'D'},
|
||||
},
|
||||
wantErr: false,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair pre-fork",
|
||||
targetEpoch: 0,
|
||||
want: ðpb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: []byte{'A', 'B', 'C', 'D'},
|
||||
PreviousVersion: []byte{'A', 'B', 'C', 'D'},
|
||||
},
|
||||
wantErr: false,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.AltairForkVersion = []byte{'A', 'B', 'C', 'F'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair on fork",
|
||||
targetEpoch: 10,
|
||||
want: ðpb.Fork{
|
||||
Epoch: 10,
|
||||
CurrentVersion: []byte{'A', 'B', 'C', 'F'},
|
||||
PreviousVersion: []byte{'A', 'B', 'C', 'D'},
|
||||
},
|
||||
wantErr: false,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.AltairForkVersion = []byte{'A', 'B', 'C', 'F'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "altair post fork",
|
||||
targetEpoch: 10,
|
||||
want: ðpb.Fork{
|
||||
Epoch: 10,
|
||||
CurrentVersion: []byte{'A', 'B', 'C', 'F'},
|
||||
PreviousVersion: []byte{'A', 'B', 'C', 'D'},
|
||||
},
|
||||
wantErr: false,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.AltairForkVersion = []byte{'A', 'B', 'C', 'F'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "3 forks, pre-fork",
|
||||
targetEpoch: 20,
|
||||
want: ðpb.Fork{
|
||||
Epoch: 10,
|
||||
CurrentVersion: []byte{'A', 'B', 'C', 'F'},
|
||||
PreviousVersion: []byte{'A', 'B', 'C', 'D'},
|
||||
},
|
||||
wantErr: false,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
{'A', 'B', 'C', 'Z'}: 100,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 forks, on fork",
|
||||
targetEpoch: 100,
|
||||
want: ðpb.Fork{
|
||||
Epoch: 100,
|
||||
CurrentVersion: []byte{'A', 'B', 'C', 'Z'},
|
||||
PreviousVersion: []byte{'A', 'B', 'C', 'F'},
|
||||
},
|
||||
wantErr: false,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
{'A', 'B', 'C', 'Z'}: 100,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.setConfg()
|
||||
got, err := Fork(tt.targetEpoch)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Fork() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Fork() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetrieveForkDataFromDigest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.GenesisEpoch = 0
|
||||
cfg.AltairForkVersion = []byte{'A', 'B', 'C', 'F'}
|
||||
cfg.AltairForkEpoch = 10
|
||||
cfg.BellatrixForkVersion = []byte{'A', 'B', 'C', 'Z'}
|
||||
cfg.BellatrixForkEpoch = 100
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
genValRoot := [32]byte{'A', 'B', 'C', 'D'}
|
||||
digest, err := signing.ComputeForkDigest([]byte{'A', 'B', 'C', 'F'}, genValRoot[:])
|
||||
assert.NoError(t, err)
|
||||
|
||||
version, epoch, err := RetrieveForkDataFromDigest(digest, genValRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, [4]byte{'A', 'B', 'C', 'F'}, version)
|
||||
assert.Equal(t, epoch, primitives.Epoch(10))
|
||||
|
||||
digest, err = signing.ComputeForkDigest([]byte{'A', 'B', 'C', 'Z'}, genValRoot[:])
|
||||
assert.NoError(t, err)
|
||||
|
||||
version, epoch, err = RetrieveForkDataFromDigest(digest, genValRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, [4]byte{'A', 'B', 'C', 'Z'}, version)
|
||||
assert.Equal(t, epoch, primitives.Epoch(100))
|
||||
}
|
||||
|
||||
func TestIsForkNextEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
{'A', 'B', 'C', 'Z'}: 100,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
genTimeCreator := func(epoch primitives.Epoch) time.Time {
|
||||
return time.Now().Add(-time.Duration(uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(epoch)*params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
}
|
||||
// Is at Fork Epoch
|
||||
genesisTime := genTimeCreator(10)
|
||||
genRoot := [32]byte{'A'}
|
||||
|
||||
isFork, err := IsForkNextEpoch(genesisTime, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, isFork)
|
||||
|
||||
// Is right before fork epoch
|
||||
genesisTime = genTimeCreator(9)
|
||||
|
||||
isFork, err = IsForkNextEpoch(genesisTime, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, isFork)
|
||||
|
||||
// Is at fork epoch
|
||||
genesisTime = genTimeCreator(100)
|
||||
|
||||
isFork, err = IsForkNextEpoch(genesisTime, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, isFork)
|
||||
|
||||
genesisTime = genTimeCreator(99)
|
||||
|
||||
// Is right before fork epoch.
|
||||
isFork, err = IsForkNextEpoch(genesisTime, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, isFork)
|
||||
}
|
||||
|
||||
func TestNextForkData(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
tests := []struct {
|
||||
name string
|
||||
setConfg func()
|
||||
currEpoch primitives.Epoch
|
||||
wantedForkVersion [4]byte
|
||||
wantedEpoch primitives.Epoch
|
||||
}{
|
||||
{
|
||||
name: "genesis fork",
|
||||
currEpoch: 0,
|
||||
wantedForkVersion: [4]byte{'A', 'B', 'C', 'D'},
|
||||
wantedEpoch: math.MaxUint64,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair pre-fork",
|
||||
currEpoch: 5,
|
||||
wantedForkVersion: [4]byte{'A', 'B', 'C', 'F'},
|
||||
wantedEpoch: 10,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.AltairForkVersion = []byte{'A', 'B', 'C', 'F'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair on fork",
|
||||
currEpoch: 10,
|
||||
wantedForkVersion: [4]byte{'A', 'B', 'C', 'F'},
|
||||
wantedEpoch: math.MaxUint64,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.AltairForkVersion = []byte{'A', 'B', 'C', 'F'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "altair post fork",
|
||||
currEpoch: 20,
|
||||
wantedForkVersion: [4]byte{'A', 'B', 'C', 'F'},
|
||||
wantedEpoch: math.MaxUint64,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.AltairForkVersion = []byte{'A', 'B', 'C', 'F'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "3 forks, pre-fork, 1st fork",
|
||||
currEpoch: 5,
|
||||
wantedForkVersion: [4]byte{'A', 'B', 'C', 'F'},
|
||||
wantedEpoch: 10,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
{'A', 'B', 'C', 'Z'}: 100,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 forks, pre-fork, 2nd fork",
|
||||
currEpoch: 50,
|
||||
wantedForkVersion: [4]byte{'A', 'B', 'C', 'Z'},
|
||||
wantedEpoch: 100,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
{'A', 'B', 'C', 'Z'}: 100,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 forks, on fork",
|
||||
currEpoch: 100,
|
||||
wantedForkVersion: [4]byte{'A', 'B', 'C', 'Z'},
|
||||
wantedEpoch: math.MaxUint64,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
{'A', 'B', 'C', 'Z'}: 100,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.setConfg()
|
||||
fVersion, fEpoch, err := NextForkData(tt.currEpoch)
|
||||
assert.NoError(t, err)
|
||||
if fVersion != tt.wantedForkVersion {
|
||||
t.Errorf("NextForkData() fork version = %v, want %v", fVersion, tt.wantedForkVersion)
|
||||
}
|
||||
if fEpoch != tt.wantedEpoch {
|
||||
t.Errorf("NextForkData() fork epoch = %v, want %v", fEpoch, tt.wantedEpoch)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastForkEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
tests := []struct {
|
||||
name string
|
||||
setConfg func()
|
||||
wantedEpoch primitives.Epoch
|
||||
}{
|
||||
{
|
||||
name: "no schedule",
|
||||
wantedEpoch: 0,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "genesis fork",
|
||||
wantedEpoch: 0,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair post fork",
|
||||
wantedEpoch: 10,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.AltairForkVersion = []byte{'A', 'B', 'C', 'F'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "3 forks, 1 valid fork",
|
||||
wantedEpoch: 5,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 5,
|
||||
{'A', 'B', 'C', 'F'}: math.MaxUint64,
|
||||
{'A', 'B', 'C', 'Z'}: math.MaxUint64,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 forks, 2 valid ones",
|
||||
wantedEpoch: 10,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
{'A', 'B', 'C', 'Z'}: math.MaxUint64,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 forks",
|
||||
wantedEpoch: 100,
|
||||
setConfg: func() {
|
||||
cfg = cfg.Copy()
|
||||
cfg.GenesisForkVersion = []byte{'A', 'B', 'C', 'D'}
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
{'A', 'B', 'C', 'D'}: 0,
|
||||
{'A', 'B', 'C', 'F'}: 10,
|
||||
{'A', 'B', 'C', 'Z'}: 100,
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.setConfg()
|
||||
fEpoch := LastForkEpoch()
|
||||
if fEpoch != tt.wantedEpoch {
|
||||
t.Errorf("LastForkEpoch() fork epoch = %v, want %v", fEpoch, tt.wantedEpoch)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user