Compare commits

...

29 Commits

Author SHA1 Message Date
Preston Van Loon
7d0a3566da Fixes required to sync fusaka-devnet-4 2025-08-11 17:48:58 -05:00
Preston Van Loon
65affd47ca Test devnet-4 changes with the ENR changes from PR 15501 2025-08-11 16:35:01 -05:00
Preston Van Loon
42fb56e498 Update log field to spell out 'nfd' as 'NextForkDigest' 2025-08-11 12:21:14 -05:00
Potuz
ab80865286 add testcase 2025-08-11 14:00:33 -03:00
Preston Van Loon
57d44bd33d Add tests for fulu NFD key 2025-08-11 11:48:30 -05:00
Kasey Kirkham
ff2994c24c fusaka fork digest enr changes 2025-08-11 11:18:11 -05:00
kasey
3da40ecd9c Refactor fork schedules (#15490)
* overhaul fork schedule management for bpos

* Unify log

* Radek's comments

* Use arg config to determine previous epoch, with regression test

* Remove unnecessary NewClock. @potuz feedback

* Continuation of previous commit: Remove unnecessary NewClock. @potuz feedback

* Remove VerifyBlockHeaderSignatureUsingCurrentFork

* cosmetic changes

* Remove unnecessary copy. entryWithForkDigest passes by value, not by pointer so it shold be fine

* Reuse ErrInvalidTopic from p2p package

* Unskip TestServer_GetBeaconConfig

* Resolve TODO about forkwatcher in local mode

* remove Copy()

---------

Co-authored-by: Kasey <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: rkapka <radoslaw.kapka@gmail.com>
Co-authored-by: Preston Van Loon <preston@pvl.dev>
2025-08-11 16:08:53 +00:00
terence
f7f992c256 gofmt: fix formatting issues in test files (#15577) 2025-08-11 15:56:53 +00:00
Manu NALEPA
978ffa4780 Add tests for sendDataColumnSidecarsRequest. 2025-08-11 00:35:40 +02:00
Manu NALEPA
407bf6785f Fix Potuz's comment. 2025-08-10 23:07:56 +02:00
Manu NALEPA
c45230b455 Fix Potuz's comment. 2025-08-10 22:47:00 +02:00
Manu NALEPA
0af6591001 Fix Potuz's comment. 2025-08-10 22:41:00 +02:00
Manu NALEPA
1af249da31 Fix Potuz's comment. 2025-08-10 22:02:22 +02:00
Manu NALEPA
901f6b6e6c Fix Potuz's comment. 2025-08-10 21:59:17 +02:00
Manu NALEPA
a3cdda56d9 Fix Potuz's comment. 2025-08-10 21:53:54 +02:00
Manu NALEPA
cf3200fa06 Fix Potuz's comment. 2025-08-10 21:47:55 +02:00
Manu NALEPA
04cafa1959 Partially fix Potuz's comment. 2025-08-10 21:39:27 +02:00
Manu NALEPA
498b945a61 Fix Satyajit's comment. 2025-08-10 21:35:39 +02:00
terence
09565e0c3a Refactor attestation cache key generation outside critical section (#15572)
* Refactor attestation cache key generation outside critical section

* Improve attestation cache key error handling and logging
2025-08-10 17:41:30 +00:00
Jun Song
05a3736310 refactor: removing redundant codes in htrutils.go (#15453)
* refactor: use auto-generated HashTreeRoot functions in htrutil.go

* refactor: use type alias for Transaction & use SliceRoot for TransactionsRoot

* changelog

* fix: TransactionsRoot receives raw 2d bytes as an argument

* fix: handle nil argument

* test: add nil test for fork and checkpoint

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-08-10 10:44:01 +00:00
kasey
84c8653a52 initialize genesis data asap at node start (#15470)
* initialize genesis data asap at node start

* add genesis validation tests with embedded state verification

* Add test for hardcoded mainnet genesis validator root and time from init() function

* Add test for UnmarshalState in encoding/ssz/detect/configfork.go

* Add tests for genesis.Initialize

* Move genesis/embedded to genesis/internal/embedded

* Gazelle / BUILD fix

* James feedback

* Fix lint

* Revert lock

---------

Co-authored-by: Kasey <kasey@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@pvl.dev>
2025-08-10 02:09:40 +00:00
Tomás Andróil
921ff23c6b refactor: use central flag name for validator HTTP server port (#15236)
* Update validator.go

* Create tomasandroil_replace_grpc_gateway_flag_name.md
2025-08-09 10:35:04 +00:00
Manu NALEPA
90317ba5b5 Fix Potuz's comment. 2025-08-09 01:24:00 +02:00
Radosław Kapka
87235fb384 Don't submit duplicate aggregated SignedContributionAndProof messages (#15571) 2025-08-08 22:57:36 +00:00
Manu NALEPA
b35358f440 Fix Potuz's comment. 2025-08-09 00:47:55 +02:00
Manu NALEPA
b926066495 Fix Potuz's comment. 2025-08-08 20:59:28 +02:00
Manu NALEPA
263ddf9a7b PeerDAS: Implement sync 2025-08-08 20:05:12 +02:00
james-prysm
2ec5914b4a fixing builder version check (#15568)
* adding fix

* fixing test
2025-08-08 01:42:55 +00:00
Potuz
fe000e5629 Fix validateConsensus (#15548)
* Fix validateConsensus

Reported by NuConstruct

The stater package looks for a stateroot using the head state from the
blockchain package. However, this state is very unlikely to have the
poststateroot since that's only added after slot processing. I assume
that essentially any REST endpoint that uses this mechanism to get head
is broken if it needs to gather a state by stateroot.

This PR is a placeholder to verify this is the issue, here I just check
if the NSC already has the post-state since that will have already the
processing state cached.

* Add changelog

* add fallback

* Fix tests
2025-08-07 13:08:40 +00:00
229 changed files with 5955 additions and 4202 deletions

View File

@@ -16,7 +16,6 @@ go_library(
"//api/server/structs:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -9,7 +9,6 @@ import (
"net/url"
"path"
"regexp"
"sort"
"strconv"
"github.com/OffchainLabs/prysm/v6/api/client"
@@ -17,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
return fr.ToConsensus()
}
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.Get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
fsr := &forkScheduleResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
ofs, err := fsr.OrderedForkSchedule()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
}
return ofs, nil
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
}
return poolResponse, nil
}
type forkScheduleResponse struct {
Data []structs.Fork
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
return nil, err
}
if len(vSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
}
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(epoch),
})
}
sort.Sort(ofs)
return ofs, nil
}

View File

@@ -1727,7 +1727,7 @@ func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
t.Run("Interface signature verification", func(t *testing.T) {
// This test verifies that the SubmitBlindedBlock method signature
// has been updated to return BlobsBundler interface
client := &Client{}
// Verify the method exists with the correct signature

View File

@@ -182,6 +182,7 @@ go_test(
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -624,6 +625,7 @@ func Test_hashForGenesisRoot(t *testing.T) {
ctx := t.Context()
c := setupBeaconChain(t, beaconDB)
st, _ := util.DeterministicGenesisStateElectra(t, 10)
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
root, err := beaconDB.GenesisBlockRoot(ctx)
require.NoError(t, err)

View File

@@ -19,6 +19,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -309,6 +310,7 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
block: wba,
}
genesis.StoreStateDuringTest(t, st)
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
a := &fcuConfig{
@@ -403,6 +405,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
bState, _ := util.DeterministicGenesisState(t, 10)
genesis.StoreStateDuringTest(t, bState)
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)

View File

@@ -6,20 +6,20 @@ import (
)
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
func Verify(sidecars ...blocks.ROBlob) error {
if len(sidecars) == 0 {
func Verify(blobSidecars ...blocks.ROBlob) error {
if len(blobSidecars) == 0 {
return nil
}
if len(sidecars) == 1 {
if len(blobSidecars) == 1 {
return kzgContext.VerifyBlobKZGProof(
bytesToBlob(sidecars[0].Blob),
bytesToCommitment(sidecars[0].KzgCommitment),
bytesToKZGProof(sidecars[0].KzgProof))
bytesToBlob(blobSidecars[0].Blob),
bytesToCommitment(blobSidecars[0].KzgCommitment),
bytesToKZGProof(blobSidecars[0].KzgProof))
}
blobs := make([]GoKZG.Blob, len(sidecars))
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
proofs := make([]GoKZG.KZGProof, len(sidecars))
for i, sidecar := range sidecars {
blobs := make([]GoKZG.Blob, len(blobSidecars))
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
for i, sidecar := range blobSidecars {
blobs[i] = *bytesToBlob(sidecar.Blob)
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)

View File

@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
}
func TestVerify(t *testing.T) {
sidecars := make([]blocks.ROBlob, 0)
require.NoError(t, Verify(sidecars...))
blobSidecars := make([]blocks.ROBlob, 0)
require.NoError(t, Verify(blobSidecars...))
}
func TestBytesToAny(t *testing.T) {

View File

@@ -240,9 +240,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
}
}
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
}
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
@@ -308,6 +309,26 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
blockVersion := roBlock.Version()
block := roBlock.Block()
slot := block.Slot()
if version.Deneb <= blockVersion && blockVersion < version.Fulu {
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
}
}
if version.Fulu <= blockVersion {
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
}
}
return nil
}
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {

View File

@@ -35,6 +35,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -1980,14 +1981,15 @@ func TestNoViableHead_Reboot(t *testing.T) {
genesisState, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := genesisState.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
gb := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(gb)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
genesisRoot, err := gb.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
require.NoError(t, service.saveGenesisData(ctx, genesisState))
genesis.StoreStateDuringTest(t, genesisState)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")

View File

@@ -12,7 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v6/async/event"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
@@ -207,17 +206,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Start a blockchain service's main event loop.
func (s *Service) Start() {
saved := s.cfg.FinalizedStateAtStartUp
defer s.removeStartupState()
if saved != nil && !saved.IsNil() {
if err := s.StartFromSavedState(saved); err != nil {
log.Fatal(err)
}
} else {
if err := s.startFromExecutionChain(); err != nil {
log.Fatal(err)
}
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
log.Fatal(err)
}
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
@@ -266,6 +257,9 @@ func (s *Service) Status() error {
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if state.IsNil(saved) {
return errors.New("Last finalized state at startup is nil")
}
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = saved.GenesisTime()
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
@@ -371,62 +365,6 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
return nil
}
func (s *Service) startFromExecutionChain() error {
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
if s.cfg.ChainStartFetcher == nil {
return errors.New("not configured execution chain")
}
go func() {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
for {
select {
case e := <-stateChannel:
if e.Type == statefeed.ChainStarted {
data, ok := e.Data.(*statefeed.ChainStartedData)
if !ok {
log.Error("Event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
s.onExecutionChainStart(s.ctx, data.StartTime)
return
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state forRoot failed")
return
}
}
}()
return nil
}
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
if err != nil {
log.WithError(err).Fatal("Could not initialize beacon chain")
}
// We start a counter to genesis, if needed.
gRoot, err := initializedState.HashTreeRoot(s.ctx)
if err != nil {
log.WithError(err).Fatal("Could not hash tree root genesis state")
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
log.WithError(err).Fatal("Failed to initialize blockchain service from execution start event")
}
}
// initializes the state and genesis block of the beacon chain to persistent storage
// based on a genesis timestamp value obtained from the ChainStart event emitted
// by the ETH1.0 Deposit Contract and the POWChain service of the node.

View File

@@ -31,6 +31,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/trie"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -51,6 +52,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
srv.Stop()
})
bState, _ := util.DeterministicGenesisState(t, 10)
genesis.StoreStateDuringTest(t, bState)
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
require.NoError(t, err)
mockTrie, err := trie.NewTrie(0)
@@ -71,20 +73,22 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
DepositContainers: []*ethpb.DepositContainer{},
})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
require.NoError(t, err)
web3Service, err = execution.NewService(
ctx,
execution.WithDatabase(beaconDB),
execution.WithHttpEndpoint(endpoint),
execution.WithDepositContractAddress(common.Address{}),
execution.WithDepositCache(depositCache),
)
require.NoError(t, err, "Unable to set up web3 service")
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
// Safe a state in stategen to purposes of testing a service stop / shutdown.
@@ -396,24 +400,6 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
}
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
mgs := &MockClockSetter{}
service.clockSetter = mgs
gt := time.Now()
service.onExecutionChainStart(t.Context(), gt)
gs, err := beaconDB.GenesisState(ctx)
require.NoError(t, err)
require.NotEqual(t, nil, gs)
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
var zero [32]byte
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
require.Equal(t, gt, mgs.G.GenesisTime())
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
}
func BenchmarkHasBlockDB(b *testing.B) {
ctx := b.Context()
s := testServiceWithDB(b)

View File

@@ -41,7 +41,6 @@ go_library(
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",

View File

@@ -11,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -101,7 +100,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
// via the respective epoch.
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
currentEpoch := slots.ToEpoch(blk.Block().Slot())
fork, err := forks.Fork(currentEpoch)
fork, err := params.Fork(currentEpoch)
if err != nil {
return err
}

View File

@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
}
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
params.SetupTestConfigCleanup(t)
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"domain.go",
"signature.go",
"signing_root.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
@@ -25,7 +24,6 @@ go_test(
name = "go_default_test",
srcs = [
"domain_test.go",
"signature_test.go",
"signing_root_test.go",
],
embed = [":go_default_library"],

View File

@@ -1,34 +0,0 @@
package signing
import (
"github.com/OffchainLabs/prysm/v6/config/params"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
var ErrNilRegistration = errors.New("nil signed registration")
// VerifyRegistrationSignature verifies the signature of a validator's registration.
func VerifyRegistrationSignature(
sr *ethpb.SignedValidatorRegistrationV1,
) error {
if sr == nil || sr.Message == nil {
return ErrNilRegistration
}
d := params.BeaconConfig().DomainApplicationBuilder
// Per spec, we want the fork version and genesis validator to be nil.
// Which is genesis value and zero by default.
sd, err := ComputeDomain(
d,
nil, /* fork version */
nil /* genesis val root */)
if err != nil {
return err
}
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
return ErrSigFailedToVerify
}
return nil
}

View File

@@ -1,42 +0,0 @@
package signing_test
import (
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestVerifyRegistrationSignature(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
reg := &ethpb.ValidatorRegistrationV1{
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
GasLimit: 123456,
Timestamp: uint64(time.Now().Unix()),
Pubkey: sk.PublicKey().Marshal(),
}
d := params.BeaconConfig().DomainApplicationBuilder
domain, err := signing.ComputeDomain(d, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(reg, domain)
require.NoError(t, err)
sk.Sign(sr[:]).Marshal()
sReg := &ethpb.SignedValidatorRegistrationV1{
Message: reg,
Signature: sk.Sign(sr[:]).Marshal(),
}
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
sReg.Signature = []byte("bad")
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
sReg.Message = nil
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
}

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"availability_blobs.go",
"availability_columns.go",
"blob_cache.go",
"data_column_cache.go",
"iface.go",
@@ -13,7 +12,6 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
@@ -23,7 +21,6 @@ go_library(
"//runtime/logging:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
@@ -33,7 +30,6 @@ go_test(
name = "go_default_test",
srcs = [
"availability_blobs_test.go",
"availability_columns_test.go",
"blob_cache_test.go",
"data_column_cache_test.go",
],
@@ -49,7 +45,6 @@ go_test(
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -53,30 +53,25 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
// by the given block are guaranteed to be persisted for the remainder of the retention period.
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROBlob) error {
if len(sidecars) == 0 {
return nil
}
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sidecars)
if err != nil {
return errors.Wrap(err, "blob sidecars from sidecars")
}
if len(blobSidecars) > 1 {
firstRoot := blobSidecars[0].BlockRoot()
for _, sidecar := range blobSidecars[1:] {
if len(sidecars) > 1 {
firstRoot := sidecars[0].BlockRoot()
for _, sidecar := range sidecars[1:] {
if sidecar.BlockRoot() != firstRoot {
return errMixedRoots
}
}
}
if !params.WithinDAPeriod(slots.ToEpoch(blobSidecars[0].Slot()), slots.ToEpoch(current)) {
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
return nil
}
key := keyFromSidecar(blobSidecars[0])
key := keyFromSidecar(sidecars[0])
entry := s.cache.ensure(key)
for _, blobSidecar := range blobSidecars {
for _, blobSidecar := range sidecars {
if err := entry.stash(&blobSidecar); err != nil {
return err
}

View File

@@ -118,23 +118,21 @@ func TestLazilyPersistent_Missing(t *testing.T) {
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
as := NewLazilyPersistentStore(store, mbv)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(1, scs[2]))
require.NoError(t, as.Persist(1, blobSidecars[2]))
err := as.IsDataAvailable(ctx, 1, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All but one persisted, return missing idx
require.NoError(t, as.Persist(1, scs[0]))
require.NoError(t, as.Persist(1, blobSidecars[0]))
err = as.IsDataAvailable(ctx, 1, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All persisted, return nil
require.NoError(t, as.Persist(1, scs...))
require.NoError(t, as.Persist(1, blobSidecars...))
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
}
@@ -149,10 +147,8 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
as := NewLazilyPersistentStore(store, mbv)
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(1, scs[0]))
require.NoError(t, as.Persist(1, blobSidecars[0]))
err := as.IsDataAvailable(ctx, 1, blk)
require.NotNil(t, err)
require.ErrorIs(t, err, errCommitmentMismatch)
@@ -161,29 +157,25 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
func TestLazyPersistOnceCommitted(t *testing.T) {
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
// stashes as expected
require.NoError(t, as.Persist(1, scs...))
require.NoError(t, as.Persist(1, blobSidecars...))
// ignores duplicates
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
// ignores index out of bound
blobSidecars[0].Index = 6
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
// ignores sidecars before the retention period
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)
require.NoError(t, as.Persist(32+slotOOB, more[0]))
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
// doesn't ignore new sidecars with a different block root
require.NoError(t, as.Persist(1, more...))
require.NoError(t, as.Persist(1, moreBlobSidecars...))
}
type mockBlobBatchVerifier struct {

View File

@@ -1,213 +0,0 @@
package das
import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
errors "github.com/pkg/errors"
)
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
// block, at which time they will undergo full verification and be saved to the disk.
type LazilyPersistentStoreColumn struct {
store *filesystem.DataColumnStorage
nodeID enode.ID
cache *dataColumnCache
newDataColumnsVerifier verification.NewDataColumnsVerifier
custodyGroupCount uint64
}
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
// they are all available, the interface takes a slice of data column sidecars.
type DataColumnsVerifier interface {
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
}
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
func NewLazilyPersistentStoreColumn(
store *filesystem.DataColumnStorage,
nodeID enode.ID,
newDataColumnsVerifier verification.NewDataColumnsVerifier,
custodyGroupCount uint64,
) *LazilyPersistentStoreColumn {
return &LazilyPersistentStoreColumn{
store: store,
nodeID: nodeID,
cache: newDataColumnCache(),
newDataColumnsVerifier: newDataColumnsVerifier,
custodyGroupCount: custodyGroupCount,
}
}
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
// by the given block are guaranteed to be persisted for the remainder of the retention period.
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
if len(sidecars) == 0 {
return nil
}
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
if err != nil {
return errors.Wrap(err, "blob sidecars from sidecars")
}
// It is safe to retrieve the first sidecar.
firstSidecar := dataColumnSidecars[0]
if len(sidecars) > 1 {
firstRoot := firstSidecar.BlockRoot()
for _, sidecar := range dataColumnSidecars[1:] {
if sidecar.BlockRoot() != firstRoot {
return errMixedRoots
}
}
}
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
return nil
}
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
entry := s.cache.ensure(key)
for _, sidecar := range dataColumnSidecars {
if err := entry.stash(&sidecar); err != nil {
return errors.Wrap(err, "stash DataColumnSidecar")
}
}
return nil
}
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
if err != nil {
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
}
// Return early for blocks that do not have any commitments.
if blockCommitments.count() == 0 {
return nil
}
// Get the root of the block.
blockRoot := block.Root()
// Build the cache key for the block.
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
entry := s.cache.ensure(key)
// Delete the cache entry for the block at the end.
defer s.cache.delete(key)
// Set the disk summary for the block in the cache entry.
entry.setDiskSummary(s.store.Summary(blockRoot))
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
if err != nil {
return errors.Wrap(err, "entry filter")
}
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
if err := verifier.ValidFields(); err != nil {
return errors.Wrap(err, "valid fields")
}
if err := verifier.SidecarInclusionProven(); err != nil {
return errors.Wrap(err, "sidecar inclusion proven")
}
if err := verifier.SidecarKzgProofVerified(); err != nil {
return errors.Wrap(err, "sidecar KZG proof verified")
}
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
if err != nil {
return errors.Wrap(err, "verified RO data columns - should never happen")
}
if err := s.store.Save(verifiedRoDataColumns); err != nil {
return errors.Wrap(err, "save data column sidecars")
}
return nil
}
// fullCommitmentsToCheck returns the commitments to check for a given block.
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// Return early for blocks that are pre-Fulu.
if block.Version() < version.Fulu {
return &safeCommitmentsArray{}, nil
}
// Compute the block epoch.
blockSlot := block.Block().Slot()
blockEpoch := slots.ToEpoch(blockSlot)
// Compute the current epoch.
currentEpoch := slots.ToEpoch(currentSlot)
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return &safeCommitmentsArray{}, nil
}
// Retrieve the KZG commitments for the block.
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
// Return early if there are no commitments in the block.
if len(kzgCommitments) == 0 {
return &safeCommitmentsArray{}, nil
}
// Retrieve peer info.
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
if err != nil {
return nil, errors.Wrap(err, "peer info")
}
// Create a safe commitments array for the custody columns.
commitmentsArray := &safeCommitmentsArray{}
commitmentsArraySize := uint64(len(commitmentsArray))
for column := range peerInfo.CustodyColumns {
if column >= commitmentsArraySize {
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
}
commitmentsArray[column] = kzgCommitments
}
return commitmentsArray, nil
}

View File

@@ -1,313 +0,0 @@
package das
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
)
var commitments = [][]byte{
bytesutil.PadTo([]byte("a"), 48),
bytesutil.PadTo([]byte("b"), 48),
bytesutil.PadTo([]byte("c"), 48),
bytesutil.PadTo([]byte("d"), 48),
}
func TestPersist(t *testing.T) {
t.Run("no sidecars", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(0)
require.NoError(t, err)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("mixed roots", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := []util.DataColumnParam{
{Slot: 1, Index: 1},
{Slot: 2, Index: 2},
}
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
require.ErrorIs(t, err, errMixedRoots)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("outside DA period", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := []util.DataColumnParam{
{Slot: 1, Index: 1},
}
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
require.NoError(t, err)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("nominal", func(t *testing.T) {
const slot = 42
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := []util.DataColumnParam{
{Slot: slot, Index: 1},
{Slot: slot, Index: 5},
}
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
require.NoError(t, err)
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
require.Equal(t, true, ok)
// A call to Persist does NOT save the sidecars to disk.
require.Equal(t, uint64(0), entry.diskSummary.Count())
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
for i, roDataColumn := range entry.scs {
if map[int]bool{1: true, 5: true}[i] {
continue
}
require.IsNil(t, roDataColumn)
}
})
}
func TestIsDataAvailable(t *testing.T) {
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
}
ctx := t.Context()
t.Run("without commitments", func(t *testing.T) {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
require.NoError(t, err)
})
t.Run("with commitments", func(t *testing.T) {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
block := signedRoBlock.Block()
slot := block.Slot()
proposerIndex := block.ProposerIndex()
parentRoot := block.ParentRoot()
stateRoot := block.StateRoot()
bodyRoot, err := block.Body().HashTreeRoot()
require.NoError(t, err)
root := signedRoBlock.Root()
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
for _, index := range indices {
dataColumnParams := util.DataColumnParam{
Index: index,
KzgCommitments: commitments,
Slot: slot,
ProposerIndex: proposerIndex,
ParentRoot: parentRoot[:],
StateRoot: stateRoot[:],
BodyRoot: bodyRoot[:],
}
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
}
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
key := cacheKey{root: root}
entry := lazilyPersistentStoreColumns.cache.ensure(key)
defer lazilyPersistentStoreColumns.cache.delete(key)
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
require.NoError(t, err)
}
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
require.NoError(t, err)
actual, err := dataColumnStorage.Get(root, indices[:])
require.NoError(t, err)
summary := dataColumnStorage.Summary(root)
require.Equal(t, uint64(len(indices)), summary.Count())
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
})
}
func TestFullCommitmentsToCheck(t *testing.T) {
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
require.NoError(t, err)
testCases := []struct {
name string
commitments [][]byte
block func(*testing.T) blocks.ROBlock
slot primitives.Slot
}{
{
name: "Pre-Fulu block",
block: func(t *testing.T) blocks.ROBlock {
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
},
},
{
name: "Commitments outside data availability window",
block: func(t *testing.T) blocks.ROBlock {
beaconBlockElectra := util.NewBeaconBlockElectra()
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
return newSignedRoBlock(t, beaconBlockElectra)
},
slot: windowSlots + 1,
},
{
name: "Commitments within data availability window",
block: func(t *testing.T) blocks.ROBlock {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
signedBeaconBlockFulu.Block.Slot = 100
return newSignedRoBlock(t, signedBeaconBlockFulu)
},
commitments: commitments,
slot: 100,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
b := tc.block(t)
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
require.NoError(t, err)
for _, commitments := range commitmentsArray {
require.DeepEqual(t, tc.commitments, commitments)
}
})
}
}
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
for _, roDataColumn := range roDataColumns {
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
}
return roSidecars, roDataColumns
}
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
}
type mockDataColumnsVerifier struct {
t *testing.T
dataColumnSidecars []blocks.RODataColumn
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
}
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
for _, dataColumnSidecar := range m.dataColumnSidecars {
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
}
return verifiedDataColumnSidecars, nil
}
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
func (m *mockDataColumnsVerifier) ValidFields() error {
m.validCalled = true
return nil
}
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
return nil
}
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
return nil
}
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
return nil
}
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
m.SidecarInclusionProvenCalled = true
return nil
}
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
m.SidecarKzgProofVerifiedCalled = true
return nil
}
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }

View File

@@ -15,5 +15,5 @@ import (
// durably persisted before returning a non-error value.
type AvailabilityStore interface {
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
}

View File

@@ -5,13 +5,12 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
errors "github.com/pkg/errors"
)
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
type MockAvailabilityStore struct {
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
}
var _ AvailabilityStore = &MockAvailabilityStore{}
@@ -25,13 +24,9 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
}
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
if err != nil {
return errors.Wrap(err, "blob sidecars from sidecars")
}
func (m *MockAvailabilityStore) Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error {
if m.PersistBlobsCallback != nil {
return m.PersistBlobsCallback(current, blobSidecars...)
return m.PersistBlobsCallback(current, blobSidecar...)
}
return nil
}

View File

@@ -13,6 +13,7 @@ go_library(
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/beacon-chain:__subpackages__",
"//genesis:__subpackages__",
"//testing/slasher/simulator:__pkg__",
"//tools:__subpackages__",
],

View File

@@ -100,6 +100,14 @@ type (
}
)
// DataColumnStorageReader is an interface to read data column sidecars from the filesystem.
type DataColumnStorageReader interface {
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error)
}
var _ DataColumnStorageReader = &DataColumnStorage{}
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
func WithDataColumnBasePath(base string) DataColumnStorageOption {
return func(b *DataColumnStorage) error {

View File

@@ -84,12 +84,6 @@ func (s DataColumnStorageSummary) Stored() map[uint64]bool {
return stored
}
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
type DataColumnStorageSummarizer interface {
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
}
type dataColumnStorageSummaryCache struct {
mu sync.RWMutex
dataColumnCount float64
@@ -98,8 +92,6 @@ type dataColumnStorageSummaryCache struct {
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
}
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
return &dataColumnStorageSummaryCache{
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),

View File

@@ -144,14 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
}
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
c := newDataColumnStorageSummaryCache()
for root, indices := range set {
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
t.Fatal(err)
}
}
return c
}

View File

@@ -115,6 +115,17 @@ type NoHeadAccessDatabase interface {
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
// Genesis operations.
LoadGenesis(ctx context.Context, stateBytes []byte) error
SaveGenesisData(ctx context.Context, state state.BeaconState) error
EnsureEmbeddedGenesis(ctx context.Context) error
// Support for checkpoint sync and backfill.
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
// Custody operations.
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
@@ -131,16 +142,6 @@ type HeadAccessDatabase interface {
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
HeadBlockRoot() ([32]byte, error)
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
// Genesis operations.
LoadGenesis(ctx context.Context, stateBytes []byte) error
SaveGenesisData(ctx context.Context, state state.BeaconState) error
EnsureEmbeddedGenesis(ctx context.Context) error
// Support for checkpoint sync and backfill.
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
}
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.

View File

@@ -40,7 +40,6 @@ go_library(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
@@ -52,6 +51,7 @@ go_library(
"//container/slice:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//genesis:go_default_library",
"//io/file:go_default_library",
"//monitoring/progress:go_default_library",
"//monitoring/tracing:go_default_library",
@@ -110,7 +110,6 @@ go_test(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
@@ -120,6 +119,7 @@ go_test(
"//consensus-types/light-client:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//proto/dbval:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -8,6 +8,7 @@ import (
dbIface "github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
"github.com/OffchainLabs/prysm/v6/genesis"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
@@ -97,8 +98,22 @@ func (s *Store) EnsureEmbeddedGenesis(ctx context.Context) error {
if err != nil {
return err
}
if gs != nil && !gs.IsNil() {
if !state.IsNil(gs) {
return s.SaveGenesisData(ctx, gs)
}
return nil
}
type LegacyGenesisProvider struct {
store *Store
}
func NewLegacyGenesisProvider(store *Store) *LegacyGenesisProvider {
return &LegacyGenesisProvider{store: store}
}
var _ genesis.Provider = &LegacyGenesisProvider{}
func (p *LegacyGenesisProvider) Genesis(ctx context.Context) (state.BeaconState, error) {
return p.store.LegacyGenesisState(ctx)
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -152,6 +153,7 @@ func TestEnsureEmbeddedGenesis(t *testing.T) {
require.NoError(t, undo())
}()
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
ctx := t.Context()
db := setupDB(t)

View File

@@ -6,14 +6,12 @@ import (
"fmt"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
statenative "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -65,21 +63,21 @@ func (s *Store) StateOrError(ctx context.Context, blockRoot [32]byte) (state.Bea
return st, nil
}
// GenesisState returns the genesis state in beacon chain.
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisState")
st, err := genesis.State()
if errors.Is(err, genesis.ErrGenesisStateNotInitialized) {
log.WithError(err).Error("genesis state not initialized, returning nil state. this should only happen in tests")
return nil, nil
}
return st, err
}
// GenesisState returns the genesis state in beacon chain.
func (s *Store) LegacyGenesisState(ctx context.Context) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LegacyGenesisState")
defer span.End()
cached, err := genesis.State(params.BeaconConfig().ConfigName)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
span.SetAttributes(trace.BoolAttribute("cache_hit", cached != nil))
if cached != nil {
return cached, nil
}
var err error
var st state.BeaconState
err = s.db.View(func(tx *bolt.Tx) error {
// Retrieve genesis block's signing root from blocks bucket,

View File

@@ -15,6 +15,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -488,7 +489,7 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) {
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), headRoot))
require.NoError(t, db.SaveState(t.Context(), st, headRoot))
genesis.StoreStateDuringTest(t, st)
savedGenesisS, err := db.GenesisState(t.Context())
require.NoError(t, err)
@@ -661,7 +662,7 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
require.NoError(t, err)
genesisRoot := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot))
genesis.StoreStateDuringTest(t, genesisState)
b := util.NewBeaconBlock()
b.Block.Slot = 1

View File

@@ -3,9 +3,9 @@ package kv
import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
)
@@ -18,7 +18,11 @@ func TestSaveOrigin(t *testing.T) {
ctx := t.Context()
db := setupDB(t)
st, err := genesis.State(params.MainnetName)
// Initialize genesis with mainnet config - this will load the embedded mainnet state
require.NoError(t, genesis.Initialize(ctx, t.TempDir()))
// Get the initialized genesis state
st, err := genesis.State()
require.NoError(t, err)
sb, err := st.MarshalSSZ()

View File

@@ -125,6 +125,7 @@ go_test(
"//contracts/deposit/mock:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//monitoring/clientstats:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -22,6 +22,7 @@ import (
contracts "github.com/OffchainLabs/prysm/v6/contracts/deposit"
"github.com/OffchainLabs/prysm/v6/contracts/deposit/mock"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/monitoring/clientstats"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -381,6 +382,7 @@ func TestInitDepositCache_OK(t *testing.T) {
require.NoError(t, err)
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), blockRootA))
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, blockRootA))
genesis.StoreStateDuringTest(t, emptyState)
s.chainStartData.Chainstarted = true
require.NoError(t, s.initDepositCaches(t.Context(), ctrs))
require.Equal(t, 3, len(s.cfg.depositCache.PendingContainers(t.Context(), nil)))
@@ -446,6 +448,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), headRoot))
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, headRoot))
require.NoError(t, stateGen.SaveState(t.Context(), headRoot, emptyState))
genesis.StoreStateDuringTest(t, emptyState)
s.cfg.stateGen = stateGen
require.NoError(t, emptyState.SetEth1DepositIndex(3))
@@ -594,6 +597,7 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
require.NoError(t, err)
assert.NoError(t, genState.SetSlot(1000))
genesis.StoreStateDuringTest(t, genState)
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
_, err = s1.validPowchainData(t.Context())
require.NoError(t, err)
@@ -655,6 +659,7 @@ func TestService_EnsureValidPowchainData(t *testing.T) {
require.NoError(t, err)
assert.NoError(t, genState.SetSlot(1000))
genesis.StoreStateDuringTest(t, genState)
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
err = s1.cfg.beaconDB.SaveExecutionChainData(t.Context(), &ethpb.ETH1ChainData{

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"clear_db.go",
"config.go",
"log.go",
"node.go",
@@ -49,7 +50,6 @@ go_library(
"//beacon-chain/sync/backfill:go_default_library",
"//beacon-chain/sync/backfill/coverage:go_default_library",
"//beacon-chain/sync/checkpoint:go_default_library",
"//beacon-chain/sync/genesis:go_default_library",
"//beacon-chain/sync/initial-sync:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd:go_default_library",
@@ -59,6 +59,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//monitoring/prometheus:go_default_library",
"//monitoring/tracing:go_default_library",
"//runtime:go_default_library",

View File

@@ -0,0 +1,101 @@
package node
import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/slasherkv"
"github.com/OffchainLabs/prysm/v6/cmd"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
)
type dbClearer struct {
shouldClear bool
force bool
confirmed bool
}
const (
clearConfirmation = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
clearDeclined = "Database will not be deleted. No changes have been made."
)
func (c *dbClearer) clearKV(ctx context.Context, db *kv.Store) (*kv.Store, error) {
if !c.shouldProceed() {
return db, nil
}
log.Warning("Removing database")
if err := db.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
return kv.NewKVStore(ctx, db.DatabasePath())
}
func (c *dbClearer) clearBlobs(bs *filesystem.BlobStorage) error {
if !c.shouldProceed() {
return nil
}
log.Warning("Removing blob storage")
if err := bs.Clear(); err != nil {
return errors.Wrap(err, "could not clear blob storage")
}
return nil
}
func (c *dbClearer) clearColumns(cs *filesystem.DataColumnStorage) error {
if !c.shouldProceed() {
return nil
}
log.Warning("Removing data columns storage")
if err := cs.Clear(); err != nil {
return errors.Wrap(err, "could not clear data columns storage")
}
return nil
}
func (c *dbClearer) clearSlasher(ctx context.Context, db *slasherkv.Store) (*slasherkv.Store, error) {
if !c.shouldProceed() {
return db, nil
}
log.Warning("Removing slasher database")
if err := db.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear slasher database")
}
return slasherkv.NewKVStore(ctx, db.DatabasePath())
}
func (c *dbClearer) shouldProceed() bool {
if !c.shouldClear {
return false
}
if c.force {
return true
}
if !c.confirmed {
confirmed, err := cmd.ConfirmAction(clearConfirmation, clearDeclined)
if err != nil {
log.WithError(err).Error("Not clearing db due to confirmation error")
return false
}
c.confirmed = confirmed
}
return c.confirmed
}
func newDbClearer(cliCtx *cli.Context) *dbClearer {
force := cliCtx.Bool(cmd.ForceClearDB.Name)
return &dbClearer{
shouldClear: cliCtx.Bool(cmd.ClearDB.Name) || force,
force: force,
}
}

View File

@@ -52,7 +52,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill/coverage"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/checkpoint"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/genesis"
initialsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/cmd"
@@ -62,6 +61,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/slice"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/monitoring/prometheus"
"github.com/OffchainLabs/prysm/v6/runtime"
"github.com/OffchainLabs/prysm/v6/runtime/prereqs"
@@ -113,7 +113,7 @@ type BeaconNode struct {
slasherAttestationsFeed *event.Feed
finalizedStateAtStartUp state.BeaconState
serviceFlagOpts *serviceFlagOpts
GenesisInitializer genesis.Initializer
GenesisProviders []genesis.Provider
CheckpointInitializer checkpoint.Initializer
forkChoicer forkchoice.ForkChoicer
clockWaiter startup.ClockWaiter
@@ -127,6 +127,7 @@ type BeaconNode struct {
syncChecker *initialsync.SyncChecker
slasherEnabled bool
lcStore *lightclient.Store
ConfigOptions []params.Option
}
// New creates a new node instance, sets up configuration options, and registers
@@ -135,18 +136,13 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
if err := configureBeacon(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not set beacon configuration options")
}
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
registry := runtime.NewServiceRegistry()
ctx := cliCtx.Context
beacon := &BeaconNode{
cliCtx: cliCtx,
ctx: ctx,
cancel: cancel,
services: registry,
services: runtime.NewServiceRegistry(),
stop: make(chan struct{}),
stateFeed: new(event.Feed),
blockFeed: new(event.Feed),
@@ -173,6 +169,25 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
}
}
dbClearer := newDbClearer(cliCtx)
dataDir := cliCtx.String(cmd.DataDirFlag.Name)
boltFname := filepath.Join(dataDir, kv.BeaconNodeDbDirName)
kvdb, err := openDB(ctx, boltFname, dbClearer)
if err != nil {
return nil, errors.Wrap(err, "could not open database")
}
beacon.db = kvdb
providers := append(beacon.GenesisProviders, kv.NewLegacyGenesisProvider(kvdb))
if err := genesis.Initialize(ctx, dataDir, providers...); err != nil {
return nil, errors.Wrap(err, "could not initialize genesis state")
}
beacon.ConfigOptions = append([]params.Option{params.WithGenesisValidatorsRoot(genesis.ValidatorsRoot())}, beacon.ConfigOptions...)
params.BeaconConfig().ApplyOptions(beacon.ConfigOptions...)
params.BeaconConfig().InitializeForkSchedule()
params.LogDigests(params.BeaconConfig())
synchronizer := startup.NewClockSynchronizer()
beacon.clockWaiter = synchronizer
beacon.forkChoicer = doublylinkedtree.New()
@@ -191,6 +206,9 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
}
beacon.BlobStorage = blobs
}
if err := dbClearer.clearBlobs(beacon.BlobStorage); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
if beacon.DataColumnStorage == nil {
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
@@ -200,8 +218,11 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.DataColumnStorage = dataColumnStorage
}
if err := dbClearer.clearColumns(beacon.DataColumnStorage); err != nil {
return nil, errors.Wrap(err, "could not clear data column storage")
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
}
@@ -289,7 +310,7 @@ func configureBeacon(cliCtx *cli.Context) error {
return nil
}
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string, clearer *dbClearer) (*backfill.Store, error) {
ctx := cliCtx.Context
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
@@ -299,7 +320,7 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
return nil, errors.Wrap(err, "could not start slashing DB")
}
@@ -479,43 +500,6 @@ func (b *BeaconNode) Close() {
close(b.stop)
}
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
var err error
clearDBConfirmed := false
if clearDB && !forceClearDB {
const (
actionText = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText = "Database will not be deleted. No changes have been made."
)
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return nil, errors.Wrapf(err, "could not confirm action")
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return nil, errors.Wrap(err, "could not create new database")
}
}
return d, nil
}
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
@@ -539,60 +523,36 @@ func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
return nil
}
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
var depositCache cache.DepositCache
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store, error) {
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(b.ctx, dbPath)
d, err := kv.NewKVStore(ctx, dbPath)
if err != nil {
return errors.Wrapf(err, "could not create database at %s", dbPath)
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
}
if clearDBRequired || forceClearDBRequired {
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
if err != nil {
return errors.Wrap(err, "could not clear database")
}
d, err = clearer.clearKV(ctx, d)
if err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
if err := d.RunMigrations(b.ctx); err != nil {
return err
}
return d, d.RunMigrations(ctx)
}
b.db = d
depositCache, err = depositsnapshot.New()
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
depositCache, err := depositsnapshot.New()
if err != nil {
return errors.Wrap(err, "could not create deposit cache")
}
b.depositCache = depositCache
if b.GenesisInitializer != nil {
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
if errors.Is(err, db.ErrExistingGenesisState) {
return errors.Errorf("Genesis state flag specified but a genesis state "+
"exists already. Run again with --%s and/or ensure you are using the "+
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
}
return errors.Wrap(err, "could not load genesis from file")
}
}
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
return errors.Wrap(err, "could not ensure embedded genesis")
}
if b.CheckpointInitializer != nil {
log.Info("Checkpoint sync - Downloading origin state and block")
if err := b.CheckpointInitializer.Initialize(b.ctx, d); err != nil {
if err := b.CheckpointInitializer.Initialize(b.ctx, b.db); err != nil {
return err
}
}
@@ -604,49 +564,25 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
log.WithField("address", depositAddress).Info("Deposit contract")
return nil
}
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context, clearer *dbClearer) error {
if !b.slasherEnabled {
return nil
}
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
if cliCtx.IsSet(flags.SlasherDirFlag.Name) {
baseDir = cliCtx.String(flags.SlasherDirFlag.Name)
}
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
return err
}
clearDBConfirmed := false
if clearDB && !forceClearDB {
actionText := "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText := "Database will not be deleted. No changes have been made."
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return err
}
d, err = clearer.clearSlasher(b.ctx, d)
if err != nil {
return errors.Wrap(err, "could not clear slasher database")
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return errors.Wrap(err, "could not clear database")
}
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
}
}
b.slasherDB = d
return nil
}
@@ -909,6 +845,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
ClockWaiter: b.clockWaiter,
InitialSyncComplete: complete,
BlobStorage: b.BlobStorage,
DataColumnStorage: b.DataColumnStorage,
}, opts...)
return b.services.RegisterService(is)
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v6/config/params"
)
// Option for beacon node configuration.
@@ -51,6 +52,13 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
}
}
func WithConfigOptions(opt ...params.Option) Option {
return func(bn *BeaconNode) error {
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
return func(bn *BeaconNode) error {

View File

@@ -72,7 +72,6 @@ go_library(
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
"//runtime:go_default_library",
@@ -169,7 +168,6 @@ go_test(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
@@ -179,6 +177,7 @@ go_test(
"//testing/util:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",

View File

@@ -15,7 +15,6 @@ import (
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
@@ -274,14 +273,8 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
return errors.New("attempted to broadcast nil light client optimistic update")
}
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest")
tracing.AnnotateError(span, err)
return err
}
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
err := errors.Wrap(err, "could not publish message")
tracing.AnnotateError(span, err)
@@ -300,13 +293,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
return errors.New("attempted to broadcast nil light client finality update")
}
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest")
tracing.AnnotateError(span, err)
return err
}
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client finality update")
err := errors.Wrap(err, "could not publish message")

View File

@@ -15,12 +15,13 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -59,6 +60,7 @@ func TestService_Broadcast(t *testing.T) {
topic := "/eth2/%x/testing"
// Set a test gossip mapping for testpb.TestSimpleMessage.
GossipTypeMapping[reflect.TypeOf(msg)] = topic
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest)
@@ -551,9 +553,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
require.NoError(t, err)
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
require.NoError(t, err)
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, digest)
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
@@ -617,9 +617,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
require.NoError(t, err)
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
require.NoError(t, err)
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, digest)
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()

View File

@@ -585,8 +585,11 @@ func (s *Service) createLocalNode(
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
currentSlot := slots.CurrentSlot(s.genesisTime)
currentEpoch := slots.ToEpoch(currentSlot)
current := params.GetNetworkScheduleEntry(currentEpoch)
next := params.NextNetworkScheduleEntry(currentEpoch)
if err := updateENR(localNode, current, next); err != nil {
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
}
@@ -707,7 +710,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
// Ignore nodes that don't match our fork digest.
nodeENR := node.Record()
if s.genesisValidatorsRoot != nil {
if err := s.compareForkENR(nodeENR); err != nil {
if err := compareForkENR(s.dv5Listener.LocalNode().Node().Record(), nodeENR); err != nil {
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
return false
}

View File

@@ -3,12 +3,9 @@ package p2p
import (
"bytes"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/network/forks"
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
@@ -16,6 +13,8 @@ import (
"github.com/sirupsen/logrus"
)
var errEth2ENRDigestMismatch = errors.New("fork digest of peer does not match local value")
// ENR key used for Ethereum consensus-related fork data.
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
@@ -25,29 +24,31 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
if !s.isInitialized() {
return [4]byte{}, errors.New("state is not initialized")
}
return forks.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot)
currentSlot := slots.CurrentSlot(s.genesisTime)
currentEpoch := slots.ToEpoch(currentSlot)
return params.ForkDigest(currentEpoch), nil
}
// Compares fork ENRs between an incoming peer's record and our node's
// local record values for current and next fork version/epoch.
func (s *Service) compareForkENR(record *enr.Record) error {
currentRecord := s.dv5Listener.LocalNode().Node().Record()
peerForkENR, err := forkEntry(record)
func compareForkENR(self, peer *enr.Record) error {
peerForkENR, err := forkEntry(peer)
if err != nil {
return err
}
currentForkENR, err := forkEntry(currentRecord)
currentForkENR, err := forkEntry(self)
if err != nil {
return err
}
enrString, err := SerializeENR(record)
enrString, err := SerializeENR(peer)
if err != nil {
return err
}
// Clients SHOULD connect to peers with current_fork_digest, next_fork_version,
// and next_fork_epoch that match local values.
if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) {
return fmt.Errorf(
return errors.Wrapf(errEth2ENRDigestMismatch,
"fork digest of peer with ENR %s: %v, does not match local value: %v",
enrString,
peerForkENR.CurrentForkDigest,
@@ -74,41 +75,36 @@ func (s *Service) compareForkENR(record *enr.Record) error {
return nil
}
// Adds a fork entry as an ENR record under the Ethereum consensus EnrKey for
// the local node. The fork entry is an ssz-encoded enrForkID type
// which takes into account the current fork version from the current
// epoch to create a fork digest, the next fork version,
// and the next fork epoch.
func addForkEntry(
node *enode.LocalNode,
genesisTime time.Time,
genesisValidatorsRoot []byte,
) (*enode.LocalNode, error) {
digest, err := forks.CreateForkDigest(genesisTime, genesisValidatorsRoot)
if err != nil {
return nil, err
}
currentSlot := slots.CurrentSlot(genesisTime)
currentEpoch := slots.ToEpoch(currentSlot)
if prysmTime.Now().Before(genesisTime) {
currentEpoch = 0
}
nextForkVersion, nextForkEpoch, err := forks.NextForkData(currentEpoch)
if err != nil {
return nil, err
}
func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) error {
enrForkID := &pb.ENRForkID{
CurrentForkDigest: digest[:],
NextForkVersion: nextForkVersion[:],
NextForkEpoch: nextForkEpoch,
CurrentForkDigest: entry.ForkDigest[:],
NextForkVersion: next.ForkVersion[:],
NextForkEpoch: next.Epoch,
}
if entry.Epoch == next.Epoch {
enrForkID.NextForkEpoch = params.BeaconConfig().FarFutureEpoch
}
logFields := logrus.Fields{
"CurrentForkDigest": fmt.Sprintf("%#x", enrForkID.CurrentForkDigest),
"NextForkVersion": fmt.Sprintf("%#x", enrForkID.NextForkVersion),
"NextForkEpoch": fmt.Sprintf("%d", enrForkID.NextForkEpoch),
}
if params.BeaconConfig().FuluForkEpoch != params.BeaconConfig().FarFutureEpoch {
if entry.ForkDigest == next.ForkDigest {
node.Set(enr.WithEntry(nfdEnrKey, make([]byte, len(next.ForkDigest))))
} else {
node.Set(enr.WithEntry(nfdEnrKey, next.ForkDigest[:]))
}
logFields["NextForkDigest"] = fmt.Sprintf("%#x", next.ForkDigest)
}
log.WithFields(logFields).Info("Updating ENR Fork ID")
enc, err := enrForkID.MarshalSSZ()
if err != nil {
return nil, err
return err
}
forkEntry := enr.WithEntry(eth2ENRKey, enc)
node.Set(forkEntry)
return node, nil
return nil
}
// Retrieves an enrForkID from an ENR record by key lookup

View File

@@ -8,254 +8,121 @@ import (
"testing"
"time"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
ma "github.com/multiformats/go-multiaddr"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
const port = 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
db := testDB.SetupDB(t)
s := &Service{
cfg: &Config{
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
PingInterval: testPingInterval,
DisableLivenessCheck: true,
DB: db,
},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
custodyInfo: &custodyInfo{},
}
bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
// Allow bootnode's table to have its initial refresh. This allows
// inbound nodes to be added in.
time.Sleep(5 * time.Second)
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
PingInterval: testPingInterval,
DisableLivenessCheck: true,
DB: db,
}
var listeners []*listenerWrapper
for i := 1; i <= 5; i++ {
port := 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
// We give every peer a different genesis validators root, which
// will cause each peer to have a different ForkDigest, preventing
// them from connecting according to our discovery rules for Ethereum consensus.
root := make([]byte, 32)
copy(root, strconv.Itoa(port))
s = &Service{
cfg: cfg,
genesisTime: genesisTime,
genesisValidatorsRoot: root,
custodyInfo: &custodyInfo{},
}
listener, err := s.startDiscoveryV5(ipAddr, pkey)
assert.NoError(t, err, "Could not start discovery for node")
listeners = append(listeners, listener)
}
defer func() {
// Close down all peers.
for _, listener := range listeners {
listener.Close()
}
}()
// Wait for the nodes to have their local routing tables to be populated with the other nodes
time.Sleep(discoveryWaitTime)
lastListener := listeners[len(listeners)-1]
nodes := lastListener.Lookup(bootNode.ID())
if len(nodes) < 4 {
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
"Expected more than or equal to %d but got %d", 4, len(nodes))
}
// Now, we start a new p2p service. It should have no peers aside from the
// bootnode given all nodes provided by discv5 will have different fork digests.
cfg.UDPPort = 14000
cfg.TCPPort = 14001
cfg.MaxPeers = 30
s, err = NewService(t.Context(), cfg)
require.NoError(t, err)
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
addrs := make([]ma.Multiaddr, 0)
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
}
}
// We should not have valid peers if the fork digest mismatched.
assert.Equal(t, 0, len(addrs), "Expected 0 valid peers")
require.NoError(t, s.Stop())
}
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
const port = 2000
func TestCompareForkENR(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
params.BeaconConfig().InitializeForkSchedule()
logrus.SetLevel(logrus.TraceLevel)
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
db := testDB.SetupDB(t)
s := &Service{
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true, DB: db},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
custodyInfo: &custodyInfo{},
}
bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
db, err := enode.OpenDB("")
assert.NoError(t, err)
_, k := createAddrAndPrivKey(t)
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
self := enode.NewLocalNode(db, k)
require.NoError(t, updateENR(self, current, next))
// Allow bootnode's table to have its initial refresh. This allows
// inbound nodes to be added in.
time.Sleep(5 * time.Second)
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
PingInterval: testPingInterval,
DisableLivenessCheck: true,
DB: db,
cases := []struct {
name string
expectErr error
expectLog string
node func(t *testing.T) *enode.Node
}{
{
name: "match",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
require.NoError(t, updateENR(peer, current, next))
return peer.Node()
},
},
{
name: "current digest mismatch",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
testDigest := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
require.NotEqual(t, current.ForkDigest, testDigest, "ensure test fork digest is unique")
currentCopy := current
currentCopy.ForkDigest = testDigest
require.NoError(t, updateENR(peer, currentCopy, next))
return peer.Node()
},
expectErr: errEth2ENRDigestMismatch,
},
{
name: "next fork version mismatch",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
testVersion := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
require.NotEqual(t, next.ForkVersion, testVersion, "ensure test fork version is unique")
nextCopy := next
nextCopy.ForkVersion = testVersion
require.NoError(t, updateENR(peer, current, nextCopy))
return peer.Node()
},
expectLog: "Peer matches fork digest but has different next fork version",
},
{
name: "next fork epoch mismatch",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
nextCopy := next
nextCopy.Epoch = nextCopy.Epoch + 1
require.NoError(t, updateENR(peer, current, nextCopy))
return peer.Node()
},
expectLog: "Peer matches fork digest but has different next fork epoch",
},
}
var listeners []*listenerWrapper
for i := 1; i <= 5; i++ {
port := 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
c := params.BeaconConfig().Copy()
nextForkEpoch := primitives.Epoch(i)
c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
params.OverrideBeaconConfig(c)
// We give every peer a different genesis validators root, which
// will cause each peer to have a different ForkDigest, preventing
// them from connecting according to our discovery rules for Ethereum consensus.
s = &Service{
cfg: cfg,
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
custodyInfo: &custodyInfo{},
}
listener, err := s.startDiscoveryV5(ipAddr, pkey)
assert.NoError(t, err, "Could not start discovery for node")
listeners = append(listeners, listener)
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
hook := logTest.NewGlobal()
peer := c.node(t)
err := compareForkENR(self.Node().Record(), peer.Record())
if c.expectErr != nil {
require.ErrorIs(t, err, c.expectErr, "Expected error to match")
} else {
require.NoError(t, err, "Expected no error comparing fork ENRs")
}
if c.expectLog != "" {
require.LogsContain(t, hook, c.expectLog, "Expected log message not found")
}
})
}
defer func() {
// Close down all peers.
for _, listener := range listeners {
listener.Close()
}
}()
// Wait for the nodes to have their local routing tables to be populated with the other nodes
time.Sleep(discoveryWaitTime)
lastListener := listeners[len(listeners)-1]
nodes := lastListener.Lookup(bootNode.ID())
if len(nodes) < 4 {
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
"Expected more than or equal to %d but got %d", 4, len(nodes))
}
// Now, we start a new p2p service. It should have no peers aside from the
// bootnode given all nodes provided by discv5 will have different fork digests.
cfg.UDPPort = 14000
cfg.TCPPort = 14001
cfg.MaxPeers = 30
cfg.StateNotifier = &mock.MockStateNotifier{}
s, err = NewService(t.Context(), cfg)
require.NoError(t, err)
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
addrs := make([]ma.Multiaddr, 0, len(nodes))
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
}
}
if len(addrs) == 0 {
t.Error("Expected to have valid peers, got 0")
}
require.LogsContain(t, hook, "Peer matches fork digest but has different next fork epoch")
require.NoError(t, s.Stop())
}
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
c.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): 0,
{0, 0, 0, 1}: 1,
}
nextForkEpoch := primitives.Epoch(1)
nextForkVersion := []byte{0, 0, 0, 1}
params.OverrideBeaconConfig(c)
params.BeaconConfig().InitializeForkSchedule()
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
digest, err := forks.CreateForkDigest(genesisTime, make([]byte, 32))
require.NoError(t, err)
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
enrForkID := &pb.ENRForkID{
CurrentForkDigest: digest[:],
NextForkVersion: nextForkVersion,
NextForkEpoch: nextForkEpoch,
CurrentForkDigest: current.ForkDigest[:],
NextForkVersion: next.ForkVersion[:],
NextForkEpoch: next.Epoch,
}
enc, err := enrForkID.MarshalSSZ()
require.NoError(t, err)
entry := enr.WithEntry(eth2ENRKey, enc)
// In epoch 1 of current time, the fork version should be
// {0, 0, 0, 1} according to the configuration override above.
temp := t.TempDir()
randNum := rand.Int()
tempPath := path.Join(temp, strconv.Itoa(randNum))
@@ -267,18 +134,16 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
localNode := enode.NewLocalNode(db, pkey)
localNode.Set(entry)
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, genesisValidatorsRoot)
require.NoError(t, err)
resp, err := forkEntry(localNode.Node().Record())
require.NoError(t, err)
assert.DeepEqual(t, want[:], resp.CurrentForkDigest)
assert.DeepEqual(t, nextForkVersion, resp.NextForkVersion)
assert.Equal(t, nextForkEpoch, resp.NextForkEpoch, "Unexpected next fork epoch")
assert.Equal(t, hexutil.Encode(current.ForkDigest[:]), hexutil.Encode(resp.CurrentForkDigest))
assert.Equal(t, hexutil.Encode(next.ForkVersion[:]), hexutil.Encode(resp.NextForkVersion))
assert.Equal(t, next.Epoch, resp.NextForkEpoch, "Unexpected next fork epoch")
}
func TestAddForkEntry_Genesis(t *testing.T) {
func TestAddForkEntry_NextForkVersion(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
temp := t.TempDir()
randNum := rand.Int()
tempPath := path.Join(temp, strconv.Itoa(randNum))
@@ -288,17 +153,135 @@ func TestAddForkEntry_Genesis(t *testing.T) {
db, err := enode.OpenDB("")
require.NoError(t, err)
bCfg := params.MainnetConfig()
bCfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{}
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] = bCfg.GenesisEpoch
params.OverrideBeaconConfig(bCfg)
localNode := enode.NewLocalNode(db, pkey)
localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32))
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
// Add the fork entry to the local node's ENR.
require.NoError(t, updateENR(localNode, current, next))
fe, err := forkEntry(localNode.Node().Record())
require.NoError(t, err)
forkEntry, err := forkEntry(localNode.Node().Record())
require.NoError(t, err)
assert.DeepEqual(t,
params.BeaconConfig().GenesisForkVersion, forkEntry.NextForkVersion,
assert.Equal(t,
hexutil.Encode(params.BeaconConfig().AltairForkVersion), hexutil.Encode(fe.NextForkVersion),
"Wanted Next Fork Version to be equal to genesis fork version")
last := params.LastForkEpoch()
current = params.GetNetworkScheduleEntry(last)
next = params.NextNetworkScheduleEntry(last)
require.NoError(t, updateENR(localNode, current, next))
entry := params.NextNetworkScheduleEntry(last)
fe, err = forkEntry(localNode.Node().Record())
require.NoError(t, err)
assert.Equal(t,
hexutil.Encode(entry.ForkVersion[:]), hexutil.Encode(fe.NextForkVersion),
"Wanted Next Fork Version to be equal to last entry in schedule")
}
func TestUpdateENR_FuluForkDigest(t *testing.T) {
setupTest := func(t *testing.T, fuluEnabled bool) (*enode.LocalNode, func()) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
if fuluEnabled {
cfg.FuluForkEpoch = 100
} else {
cfg.FuluForkEpoch = cfg.FarFutureEpoch
}
cfg.FuluForkVersion = []byte{5, 0, 0, 0}
params.OverrideBeaconConfig(cfg)
cfg.InitializeForkSchedule()
pkey, err := privKey(&Config{DataDir: t.TempDir()})
require.NoError(t, err, "Could not get private key")
db, err := enode.OpenDB("")
require.NoError(t, err)
localNode := enode.NewLocalNode(db, pkey)
cleanup := func() {
db.Close()
}
return localNode, cleanup
}
tests := []struct {
name string
fuluEnabled bool
currentEntry params.NetworkScheduleEntry
nextEntry params.NetworkScheduleEntry
validateNFD func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry)
}{
{
name: "different digests sets nfd to next digest",
fuluEnabled: true,
currentEntry: params.NetworkScheduleEntry{
Epoch: 50,
ForkDigest: [4]byte{1, 2, 3, 4},
ForkVersion: [4]byte{1, 0, 0, 0},
},
nextEntry: params.NetworkScheduleEntry{
Epoch: 100,
ForkDigest: [4]byte{5, 6, 7, 8}, // Different from current
ForkVersion: [4]byte{2, 0, 0, 0},
},
validateNFD: func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry) {
var nfdValue []byte
err := localNode.Node().Record().Load(enr.WithEntry(nfdEnrKey, &nfdValue))
require.NoError(t, err)
assert.DeepEqual(t, nextEntry.ForkDigest[:], nfdValue, "nfd entry should equal next fork digest")
},
},
{
name: "same digests sets nfd to empty",
fuluEnabled: true,
currentEntry: params.NetworkScheduleEntry{
Epoch: 50,
ForkDigest: [4]byte{1, 2, 3, 4},
ForkVersion: [4]byte{1, 0, 0, 0},
},
nextEntry: params.NetworkScheduleEntry{
Epoch: 100,
ForkDigest: [4]byte{1, 2, 3, 4}, // Same as current
ForkVersion: [4]byte{2, 0, 0, 0},
},
validateNFD: func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry) {
var nfdValue []byte
err := localNode.Node().Record().Load(enr.WithEntry(nfdEnrKey, &nfdValue))
require.NoError(t, err)
assert.DeepEqual(t, make([]byte, len(nextEntry.ForkDigest)), nfdValue, "nfd entry should be empty bytes when digests are same")
},
},
{
name: "fulu disabled does not add nfd field",
fuluEnabled: false,
currentEntry: params.NetworkScheduleEntry{
Epoch: 50,
ForkDigest: [4]byte{1, 2, 3, 4},
ForkVersion: [4]byte{1, 0, 0, 0},
},
nextEntry: params.NetworkScheduleEntry{
Epoch: 100,
ForkDigest: [4]byte{5, 6, 7, 8}, // Different from current
ForkVersion: [4]byte{2, 0, 0, 0},
},
validateNFD: func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry) {
var nfdValue []byte
err := localNode.Node().Record().Load(enr.WithEntry(nfdEnrKey, &nfdValue))
require.ErrorContains(t, "missing ENR key", err, "nfd field should not be present when Fulu fork is disabled")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
localNode, cleanup := setupTest(t, tt.fuluEnabled)
defer cleanup()
currentEntry := tt.currentEntry
nextEntry := tt.nextEntry
require.NoError(t, updateENR(localNode, currentEntry, nextEntry))
tt.validateNFD(t, localNode, nextEntry)
})
}
}

View File

@@ -9,27 +9,26 @@ import (
// updates the node's discovery service to reflect any new fork version
// changes.
func (s *Service) forkWatcher() {
// Exit early if discovery is disabled - there's no ENR to update
if s.dv5Listener == nil {
log.Debug("Discovery disabled, exiting fork watcher")
return
}
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
var scheduleEntry params.NetworkScheduleEntry
for {
select {
case currSlot := <-slotTicker.C():
currEpoch := slots.ToEpoch(currSlot)
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
currEpoch == params.BeaconConfig().DenebForkEpoch ||
currEpoch == params.BeaconConfig().ElectraForkEpoch ||
currEpoch == params.BeaconConfig().FuluForkEpoch {
// If we are in the fork epoch, we update our enr with
// the updated fork digest. These repeatedly does
// this over the epoch, which might be slightly wasteful
// but is fine nonetheless.
if s.dv5Listener != nil { // make sure it's not a local network
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not add fork entry")
}
currentEpoch := slots.ToEpoch(currSlot)
newEntry := params.GetNetworkScheduleEntry(currentEpoch)
if newEntry.ForkDigest != scheduleEntry.ForkDigest {
nextEntry := params.NextNetworkScheduleEntry(currentEpoch)
if err := updateENR(s.dv5Listener.LocalNode(), newEntry, nextEntry); err != nil {
log.WithFields(newEntry.LogFields()).WithError(err).Error("Could not add fork entry")
continue // don't replace scheduleEntry until this succeeds
}
scheduleEntry = newEntry
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
"github.com/OffchainLabs/prysm/v6/network/forks"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
)
@@ -39,7 +38,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
copy(msg, "invalid")
return bytesutil.UnsafeCastToString(msg)
}
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
_, fEpoch, err := params.ForkDataFromDigest(digest)
if err != nil {
// Impossible condition that should
// never be hit.

View File

@@ -7,10 +7,10 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/golang/snappy"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
@@ -18,28 +18,27 @@ import (
func TestMsgID_HashesCorrectly(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
assert.NoError(t, err)
clock := startup.NewClock(time.Now(), bytesutil.ToBytes32([]byte{'A'}))
valRoot := clock.GenesisValidatorsRoot()
d := params.ForkDigest(clock.CurrentEpoch())
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
hashedData := hash.Hash(append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
msgID := string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], pMsg), "Got incorrect msg id")
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
enc := snappy.Encode(nil, validObj[:])
nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
hashedData = hash.Hash(append(params.BeaconConfig().MessageDomainValidSnappy[:], validObj[:]...))
msgID = string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], nMsg), "Got incorrect msg id")
}
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genesisValidatorsRoot)
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
assert.NoError(t, err)
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
topicLen := uint64(len(tpc))
@@ -52,7 +51,7 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
combinedObj = append(combinedObj, pMsg.Data...)
hashedData := hash.Hash(combinedObj)
msgID := string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
enc := snappy.Encode(nil, validObj[:])
@@ -63,13 +62,12 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
combinedObj = append(combinedObj, validObj[:]...)
hashedData = hash.Hash(combinedObj)
msgID = string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
}
func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot)
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
assert.NoError(t, err)
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
topicLen := uint64(len(tpc))
@@ -82,7 +80,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
combinedObj = append(combinedObj, pMsg.Data...)
hashedData := hash.Hash(combinedObj)
msgID := string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
enc := snappy.Encode(nil, validObj[:])
@@ -93,7 +91,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
combinedObj = append(combinedObj, validObj[:]...)
hashedData = hash.Hash(combinedObj)
msgID = string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
}
func TestMsgID_WithNilTopic(t *testing.T) {

View File

@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
},
check: func(scorer *scorers.GossipScorer) {
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
_, _, topicMap, err := scorer.GossipData("peer1")
assert.NoError(t, err)
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")

View File

@@ -40,7 +40,7 @@ const (
rSubD = 8 // random gossip target
)
var errInvalidTopic = errors.New("invalid topic format")
var ErrInvalidTopic = errors.New("invalid topic format")
// Specifies the fixed size context length.
const digestLength = 4
@@ -219,12 +219,12 @@ func convertTopicScores(topicMap map[string]*pubsub.TopicScoreSnapshot) map[stri
func ExtractGossipDigest(topic string) ([4]byte, error) {
// Ensure the topic prefix is correct.
if len(topic) < len(gossipTopicPrefix)+1 || topic[:len(gossipTopicPrefix)] != gossipTopicPrefix {
return [4]byte{}, errInvalidTopic
return [4]byte{}, ErrInvalidTopic
}
start := len(gossipTopicPrefix)
end := strings.Index(topic[start:], "/")
if end == -1 { // Ensure a topic suffix exists.
return [4]byte{}, errInvalidTopic
return [4]byte{}, ErrInvalidTopic
}
end += start
strDigest := topic[start:end]

View File

@@ -1,12 +1,12 @@
package p2p
import (
"encoding/hex"
"fmt"
"strings"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/network/forks"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
@@ -32,6 +32,14 @@ var _ pubsub.SubscriptionFilter = (*Service)(nil)
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
const pubsubSubscriptionRequestLimit = 500
func (s *Service) setAllForkDigests() {
entries := params.SortedNetworkScheduleEntries()
s.allForkDigests = make(map[[4]byte]struct{}, len(entries))
for _, entry := range entries {
s.allForkDigests[entry.ForkDigest] = struct{}{}
}
}
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
func (s *Service) CanSubscribe(topic string) bool {
if !s.isInitialized() {
@@ -48,50 +56,18 @@ func (s *Service) CanSubscribe(topic string) bool {
if parts[1] != "eth2" {
return false
}
phase0ForkDigest, err := s.currentForkDigest()
var digest [4]byte
dl, err := hex.Decode(digest[:], []byte(parts[2]))
if err == nil && dl != 4 {
err = fmt.Errorf("expected 4 bytes, got %d", dl)
}
if err != nil {
log.WithError(err).Error("Could not determine fork digest")
log.WithError(err).WithField("topic", topic).WithField("digest", parts[2]).Error("CanSubscribe failed to parse message")
return false
}
altairForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine altair fork digest")
return false
}
bellatrixForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Bellatrix fork digest")
return false
}
capellaForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Capella fork digest")
return false
}
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Deneb fork digest")
return false
}
electraForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Electra fork digest")
return false
}
fuluForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Fulu fork digest")
return false
}
switch parts[2] {
case fmt.Sprintf("%x", phase0ForkDigest):
case fmt.Sprintf("%x", altairForkDigest):
case fmt.Sprintf("%x", bellatrixForkDigest):
case fmt.Sprintf("%x", capellaForkDigest):
case fmt.Sprintf("%x", denebForkDigest):
case fmt.Sprintf("%x", electraForkDigest):
case fmt.Sprintf("%x", fuluForkDigest):
default:
if _, ok := s.allForkDigests[digest]; !ok {
log.WithField("topic", topic).WithField("digest", fmt.Sprintf("%#x", digest)).Error("CanSubscribe failed to find digest in allForkDigests")
return false
}

View File

@@ -12,8 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
@@ -22,12 +20,11 @@ import (
func TestService_CanSubscribe(t *testing.T) {
params.SetupTestConfigCleanup(t)
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
params.BeaconConfig().InitializeForkSchedule()
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
genesisTime := time.Now()
var valRoot [32]byte
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
assert.NoError(t, err)
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
currentFork := params.GetNetworkScheduleEntry(clock.CurrentEpoch()).ForkDigest
digest := params.ForkDigest(clock.CurrentEpoch())
type test struct {
name string
topic string
@@ -109,12 +106,14 @@ func TestService_CanSubscribe(t *testing.T) {
}
tests = append(tests, tt)
}
valRoot := clock.GenesisValidatorsRoot()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
genesisValidatorsRoot: valRoot[:],
genesisTime: genesisTime,
genesisTime: clock.GenesisTime(),
}
s.setAllForkDigests()
if got := s.CanSubscribe(tt.topic); got != tt.want {
t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want)
}
@@ -220,11 +219,10 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
func TestService_FilterIncomingSubscriptions(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
digest := params.ForkDigest(clock.CurrentEpoch())
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
genesisTime := time.Now()
var valRoot [32]byte
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
assert.NoError(t, err)
type args struct {
id peer.ID
subs []*pubsubpb.RPC_SubOpts
@@ -321,12 +319,14 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
},
},
}
valRoot := clock.GenesisValidatorsRoot()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
genesisValidatorsRoot: valRoot[:],
genesisTime: genesisTime,
genesisTime: clock.GenesisTime(),
}
s.setAllForkDigests()
got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs)
if (err != nil) != tt.wantErr {
t.Errorf("FilterIncomingSubscriptions() error = %v, wantErr %v", err, tt.wantErr)

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -63,42 +64,42 @@ var (
)
// Service for managing peer to peer (p2p) networking.
type (
Service struct {
started bool
isPreGenesis bool
pingMethod func(ctx context.Context, id peer.ID) error
pingMethodLock sync.RWMutex
cancel context.CancelFunc
cfg *Config
peers *peers.Status
addrFilter *multiaddr.Filters
ipLimiter *leakybucket.Collector
privKey *ecdsa.PrivateKey
metaData metadata.Metadata
pubsub *pubsub.PubSub
joinedTopics map[string]*pubsub.Topic
joinedTopicsLock sync.RWMutex
subnetsLock map[uint64]*sync.RWMutex
subnetsLockLock sync.Mutex // Lock access to subnetsLock
initializationLock sync.Mutex
dv5Listener ListenerRebooter
startupErr error
ctx context.Context
host host.Host
genesisTime time.Time
genesisValidatorsRoot []byte
activeValidatorCount uint64
peerDisconnectionTime *cache.Cache
custodyInfo *custodyInfo
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
}
type Service struct {
started bool
isPreGenesis bool
pingMethod func(ctx context.Context, id peer.ID) error
pingMethodLock sync.RWMutex
cancel context.CancelFunc
cfg *Config
peers *peers.Status
addrFilter *multiaddr.Filters
ipLimiter *leakybucket.Collector
privKey *ecdsa.PrivateKey
metaData metadata.Metadata
pubsub *pubsub.PubSub
joinedTopics map[string]*pubsub.Topic
joinedTopicsLock sync.RWMutex
subnetsLock map[uint64]*sync.RWMutex
subnetsLockLock sync.Mutex // Lock access to subnetsLock
initializationLock sync.Mutex
dv5Listener ListenerRebooter
startupErr error
ctx context.Context
host host.Host
genesisTime time.Time
genesisValidatorsRoot []byte
activeValidatorCount uint64
peerDisconnectionTime *cache.Cache
custodyInfo *custodyInfo
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
clock *startup.Clock
allForkDigests map[[4]byte]struct{}
}
custodyInfo struct {
earliestAvailableSlot primitives.Slot
groupCount uint64
}
)
type custodyInfo struct {
earliestAvailableSlot primitives.Slot
groupCount uint64
}
// NewService initializes a new p2p service compatible with shared.Service interface. No
// connections are made until the Start function is called during the service registry startup.
@@ -202,6 +203,7 @@ func (s *Service) Start() {
// Waits until the state is initialized via an event feed.
// Used for fork-related data when connecting peers.
s.awaitStateInitialized()
s.setAllForkDigests()
s.isPreGenesis = false
var relayNodes []string
@@ -455,7 +457,7 @@ func (s *Service) awaitStateInitialized() {
s.genesisTime = clock.GenesisTime()
gvr := clock.GenesisValidatorsRoot()
s.genesisValidatorsRoot = gvr[:]
_, err = s.currentForkDigest() // initialize fork digest cache
_, err = s.currentForkDigest()
if err != nil {
log.WithError(err).Error("Could not initialize fork digest")
}

View File

@@ -16,8 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
@@ -346,14 +344,16 @@ func TestPeer_Disconnect(t *testing.T) {
func TestService_JoinLeaveTopic(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
defer cancel()
gs := startup.NewClockSynchronizer()
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs, DB: testDB.SetupDB(t)})
require.NoError(t, err)
go s.awaitStateInitialized()
fd := initializeStateWithForkDigest(ctx, t, gs)
s.setAllForkDigests()
s.awaitStateInitialized()
assert.Equal(t, 0, len(s.joinedTopics))
@@ -382,15 +382,13 @@ func TestService_JoinLeaveTopic(t *testing.T) {
// digest associated with that genesis event.
func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte {
gt := prysmTime.Now()
gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32))
require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr)))
fd, err := forks.CreateForkDigest(gt, gvr[:])
require.NoError(t, err)
gvr := params.BeaconConfig().GenesisValidatorsRoot
clock := startup.NewClock(gt, gvr)
require.NoError(t, gs.SetClock(clock))
time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
return fd
return params.ForkDigest(clock.CurrentEpoch())
}
func TestService_connectWithPeer(t *testing.T) {

View File

@@ -27,6 +27,8 @@ import (
"github.com/prysmaticlabs/go-bitfield"
)
const nfdEnrKey = "nfd" // The ENR record key for "nfd" (Next Fork Digest).
var (
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount

View File

@@ -3,7 +3,6 @@ package p2p
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"testing"
"time"
@@ -36,17 +35,8 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
// find and connect to a node already subscribed to a specific subnet.
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
// Define the genesis validators root, to ensure everybody is on the same network.
const (
genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
subnetCount = 3
minimumPeersPerSubnet = 1
)
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
require.NoError(t, err)
// Create a context.
const subnetCount = 3
const minimumPeersPerSubnet = 1
ctx := t.Context()
// Use shorter period for testing.
@@ -58,6 +48,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
// Create flags.
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 1
flags.Init(gFlags)
@@ -74,7 +65,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
bootNodeService := &Service{
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
genesisValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot[:],
custodyInfo: &custodyInfo{},
}
@@ -111,7 +102,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
require.NoError(t, err)
service.genesisTime = genesisTime
service.genesisValidatorsRoot = genesisValidatorsRoot
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
service.custodyInfo = &custodyInfo{}
nodeForkDigest, err := service.currentForkDigest()
@@ -158,11 +149,11 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
DB: db,
}
service, err := NewService(ctx, cfg)
service, err := NewService(t.Context(), cfg)
require.NoError(t, err)
service.genesisTime = genesisTime
service.genesisValidatorsRoot = genesisValidatorsRoot
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
service.custodyInfo = &custodyInfo{}
service.Start()

View File

@@ -1008,9 +1008,29 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
}
parentStateRoot := parentBlock.Block().StateRoot()
parentState, err := s.Stater.State(ctx, parentStateRoot[:])
if err != nil {
return errors.Wrap(err, "could not get parent state")
// Check if the state is already cached
parentState := transition.NextSlotState(parentBlockRoot[:], blk.Block().Slot())
if parentState == nil {
// The state is not advanced in the NSC, check first if the parent post-state is head
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get head root")
}
if bytes.Equal(headRoot, parentBlockRoot[:]) {
parentState, err = s.HeadFetcher.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
parentState, err = transition.ProcessSlots(ctx, parentState, blk.Block().Slot())
if err != nil {
return errors.Wrap(err, "could not process slots to get parent state")
}
} else {
parentState, err = s.Stater.State(ctx, parentStateRoot[:])
if err != nil {
return errors.Wrap(err, "could not get parent state")
}
}
}
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
if err != nil {

View File

@@ -3504,9 +3504,14 @@ func TestValidateConsensus(t *testing.T) {
require.NoError(t, err)
parentRoot, err := parentSbb.Block().HashTreeRoot()
require.NoError(t, err)
mockChainService := &chainMock.ChainService{
State: parentState,
Root: parentRoot[:],
}
server := &Server{
Blocker: &testutil.MockBlocker{RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{parentRoot: parentSbb}},
Stater: &testutil.MockStater{StatesByRoot: map[[32]byte]state.BeaconState{bytesutil.ToBytes32(parentBlock.Block.StateRoot): parentState}},
Blocker: &testutil.MockBlocker{RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{parentRoot: parentSbb}},
Stater: &testutil.MockStater{StatesByRoot: map[[32]byte]state.BeaconState{bytesutil.ToBytes32(parentBlock.Block.StateRoot): parentState}},
HeadFetcher: mockChainService,
}
require.NoError(t, server.validateConsensus(ctx, &eth.GenericSignedBeaconBlock{

View File

@@ -9,7 +9,6 @@ go_library(
"//api/server/structs:go_default_library",
"//config/params:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//network/httputil:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
@@ -24,8 +23,6 @@ go_test(
"//api/server/structs:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",

View File

@@ -11,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/network/httputil"
"github.com/ethereum/go-ethereum/common/hexutil"
log "github.com/sirupsen/logrus"
@@ -35,34 +34,25 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "config.GetForkSchedule")
defer span.End()
schedule := params.BeaconConfig().ForkVersionSchedule
schedule := params.SortedForkSchedule()
data := make([]*structs.Fork, 0, len(schedule))
if len(schedule) == 0 {
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
Data: make([]*structs.Fork, 0),
Data: data,
})
return
}
versions := forks.SortedForkVersions(schedule)
chainForks := make([]*structs.Fork, len(schedule))
var previous, current []byte
for i, v := range versions {
if i == 0 {
previous = params.BeaconConfig().GenesisForkVersion
} else {
previous = current
}
copyV := v
current = copyV[:]
chainForks[i] = &structs.Fork{
PreviousVersion: hexutil.Encode(previous),
CurrentVersion: hexutil.Encode(current),
Epoch: fmt.Sprintf("%d", schedule[v]),
}
previous := schedule[0]
for _, entry := range schedule {
data = append(data, &structs.Fork{
PreviousVersion: hexutil.Encode(previous.ForkVersion[:]),
CurrentVersion: hexutil.Encode(entry.ForkVersion[:]),
Epoch: fmt.Sprintf("%d", entry.Epoch),
})
previous = entry
}
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
Data: chainForks,
Data: data,
})
}

View File

@@ -13,8 +13,6 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/ethereum/go-ethereum/common"
@@ -592,43 +590,34 @@ func TestGetSpec(t *testing.T) {
func TestForkSchedule_Ok(t *testing.T) {
t.Run("ok", func(t *testing.T) {
genesisForkVersion := []byte("Genesis")
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
config.GenesisForkVersion = genesisForkVersion
// Create fork schedule adding keys in non-sorted order.
schedule := make(map[[4]byte]primitives.Epoch, 3)
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
config.ForkVersionSchedule = schedule
params.OverrideBeaconConfig(config)
config.InitializeForkSchedule()
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
genesisStr, firstStr, secondStr := hexutil.Encode(config.GenesisForkVersion), hexutil.Encode(config.AltairForkVersion), hexutil.Encode(config.BellatrixForkVersion)
GetForkSchedule(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetForkScheduleResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 3, len(resp.Data))
schedule := params.SortedForkSchedule()
require.Equal(t, len(schedule), len(resp.Data))
fork := resp.Data[0]
assert.DeepEqual(t, hexutil.Encode(genesisForkVersion), fork.PreviousVersion)
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", firstForkEpoch), fork.Epoch)
assert.Equal(t, genesisStr, fork.PreviousVersion)
assert.Equal(t, genesisStr, fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", config.GenesisEpoch), fork.Epoch)
fork = resp.Data[1]
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.PreviousVersion)
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", secondForkEpoch), fork.Epoch)
assert.Equal(t, genesisStr, fork.PreviousVersion)
assert.Equal(t, firstStr, fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", config.AltairForkEpoch), fork.Epoch)
fork = resp.Data[2]
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.PreviousVersion)
assert.DeepEqual(t, hexutil.Encode(thirdForkVersion), fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", thirdForkEpoch), fork.Epoch)
assert.Equal(t, firstStr, fork.PreviousVersion)
assert.Equal(t, secondStr, fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", config.BellatrixForkEpoch), fork.Epoch)
})
t.Run("correct number of forks", func(t *testing.T) {
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
@@ -639,8 +628,8 @@ func TestForkSchedule_Ok(t *testing.T) {
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetForkScheduleResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
os := forks.NewOrderedSchedule(params.BeaconConfig())
assert.Equal(t, os.Len(), len(resp.Data))
os := params.SortedForkSchedule()
assert.Equal(t, len(os), len(resp.Data))
})
}

View File

@@ -12,12 +12,10 @@ go_library(
"//api:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//network/httputil:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",

View File

@@ -7,12 +7,10 @@ import (
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/network/httputil"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -111,17 +109,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
updateSlot := update.AttestedHeader().Beacon().Slot
updateEpoch := slots.ToEpoch(updateSlot)
updateFork, err := forks.Fork(updateEpoch)
if err != nil {
httputil.HandleError(w, "Could not get fork Version: "+err.Error(), http.StatusInternalServerError)
return
}
forkDigest, err := signing.ComputeForkDigest(updateFork.CurrentVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
if err != nil {
httputil.HandleError(w, "Could not compute fork digest: "+err.Error(), http.StatusInternalServerError)
return
}
updateEntry := params.GetNetworkScheduleEntry(updateEpoch)
updateSSZ, err := update.MarshalSSZ()
if err != nil {
httputil.HandleError(w, "Could not marshal update to SSZ: "+err.Error(), http.StatusInternalServerError)
@@ -133,7 +121,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
if _, err := w.Write(chunkLength); err != nil {
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
}
if _, err := w.Write(forkDigest[:]); err != nil {
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
}
if _, err := w.Write(updateSSZ); err != nil {

View File

@@ -19,7 +19,10 @@ func (_ *Server) GetBeaconConfig(_ context.Context, _ *emptypb.Empty) (*ethpb.Be
numFields := val.Type().NumField()
res := make(map[string]string, numFields)
for i := 0; i < numFields; i++ {
res[val.Type().Field(i).Name] = fmt.Sprintf("%v", val.Field(i).Interface())
field := val.Type().Field(i)
if field.IsExported() {
res[field.Name] = fmt.Sprintf("%v", val.Field(i).Interface())
}
}
return &ethpb.BeaconConfig{
Config: res,

View File

@@ -17,10 +17,19 @@ func TestServer_GetBeaconConfig(t *testing.T) {
res, err := bs.GetBeaconConfig(ctx, &emptypb.Empty{})
require.NoError(t, err)
conf := params.BeaconConfig()
numFields := reflect.TypeOf(conf).Elem().NumField()
confType := reflect.TypeOf(conf).Elem()
numFields := confType.NumField()
// Count only exported fields, as unexported fields are not included in the config
exportedFields := 0
for i := 0; i < numFields; i++ {
if confType.Field(i).IsExported() {
exportedFields++
}
}
// Check if the result has the same number of items as our config struct.
assert.Equal(t, numFields, len(res.Config), "Unexpected number of items in config")
// Check if the result has the same number of items as exported fields in our config struct.
assert.Equal(t, exportedFields, len(res.Config), "Unexpected number of items in config")
want := fmt.Sprintf("%d", conf.Eth1FollowDistance)
// Check that an element is properly populated from the config.

View File

@@ -81,10 +81,10 @@ go_library(
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//genesis:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -155,6 +155,7 @@ common_deps = [
"//crypto/bls/blst:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//genesis:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -564,7 +564,7 @@ func TestBuildValidatorAssignmentMap(t *testing.T) {
start := primitives.Slot(200)
bySlot := [][][]primitives.ValidatorIndex{
{{1, 2, 3}}, // slot 200, committee 0
{{7, 8, 9}}, // slot 201, committee 0
{{7, 8, 9}}, // slot 201, committee 0
{{4, 5}, {10, 11}}, // slot 202, committee 0 & 1
}
@@ -632,7 +632,7 @@ func TestGetValidatorAssignment_WithAssignmentMap(t *testing.T) {
}
vs := &Server{}
// Test existing validator (validator 2 is at position 1 in the committee, not position 2)
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(2))
require.NotNil(t, assignment)
@@ -662,7 +662,7 @@ func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
}
vs := &Server{}
// Test existing validator
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(5))
require.NotNil(t, assignment)
@@ -682,24 +682,24 @@ func TestLoadMetadata_ThresholdBehavior(t *testing.T) {
epoch := primitives.Epoch(0)
tests := []struct {
name string
numValidators int
expectAssignmentMap bool
name string
numValidators int
expectAssignmentMap bool
}{
{
name: "Small request - below threshold",
numValidators: 100,
expectAssignmentMap: false,
name: "Small request - below threshold",
numValidators: 100,
expectAssignmentMap: false,
},
{
name: "Large request - at threshold",
numValidators: validatorLookupThreshold,
expectAssignmentMap: true,
name: "Large request - at threshold",
numValidators: validatorLookupThreshold,
expectAssignmentMap: true,
},
{
name: "Large request - above threshold",
numValidators: validatorLookupThreshold + 1000,
expectAssignmentMap: true,
name: "Large request - above threshold",
numValidators: validatorLookupThreshold + 1000,
expectAssignmentMap: true,
},
}

View File

@@ -219,9 +219,11 @@ func (vs *Server) getPayloadHeaderFromBuilder(
return nil, errors.New("builder returned nil bid")
}
bidVersion := signedBid.Version()
headBlockVersion := b.Version()
if !isVersionCompatible(bidVersion, headBlockVersion) {
return nil, fmt.Errorf("builder bid response version: %d is not compatible with head block version: %d for epoch %d", bidVersion, headBlockVersion, slots.ToEpoch(slot))
epoch := slots.ToEpoch(slot)
entry := params.GetNetworkScheduleEntry(epoch)
forkVersion := entry.VersionEnum
if !isVersionCompatible(bidVersion, forkVersion) {
return nil, fmt.Errorf("builder bid response version: %d is not compatible with expected version: %d for epoch %d", bidVersion, forkVersion, epoch)
}
bid, err := signedBid.Message()

View File

@@ -873,6 +873,7 @@ func TestServer_getPayloadHeader(t *testing.T) {
err string
returnedHeader *v1.ExecutionPayloadHeader
returnedHeaderCapella *v1.ExecutionPayloadHeaderCapella
forkVersion int
}{
{
name: "can't request before bellatrix epoch",
@@ -974,7 +975,7 @@ func TestServer_getPayloadHeader(t *testing.T) {
return wb
}(),
},
err: "builder bid response version: 3 is not compatible with head block version: 2 for epoch 1",
err: "builder bid response version: 3 is not compatible with expected version: 2 for epoch 1",
},
{
name: "different bid version during hard fork",
@@ -1085,10 +1086,21 @@ func TestServer_getPayloadHeader(t *testing.T) {
}(),
},
// Should succeed because Electra bids are compatible with Fulu head blocks
forkVersion: version.Fulu,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if tc.forkVersion != 0 {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
if tc.forkVersion == version.Fulu {
cfg.FuluForkEpoch = 0
cfg.BellatrixForkEpoch = 0
cfg.InitializeForkSchedule()
}
params.OverrideBeaconConfig(cfg)
}
vs := &Server{BeaconDB: dbTest.SetupDB(t), BlockBuilder: tc.mock, HeadFetcher: tc.fetcher, TimeFetcher: &blockchainTest.ChainService{
Genesis: genesis,
}}
@@ -1305,104 +1317,103 @@ func Test_expectedGasLimit(t *testing.T) {
}
func TestIsVersionCompatible(t *testing.T) {
tests := []struct {
name string
bidVersion int
headBlockVersion int
want bool
name string
bidVersion int
currentForkVersion int
want bool
}{
{
name: "Exact version match - Bellatrix",
bidVersion: version.Bellatrix,
headBlockVersion: version.Bellatrix,
want: true,
name: "Exact version match - Bellatrix",
bidVersion: version.Bellatrix,
currentForkVersion: version.Bellatrix,
want: true,
},
{
name: "Exact version match - Capella",
bidVersion: version.Capella,
headBlockVersion: version.Capella,
want: true,
name: "Exact version match - Capella",
bidVersion: version.Capella,
currentForkVersion: version.Capella,
want: true,
},
{
name: "Exact version match - Deneb",
bidVersion: version.Deneb,
headBlockVersion: version.Deneb,
want: true,
name: "Exact version match - Deneb",
bidVersion: version.Deneb,
currentForkVersion: version.Deneb,
want: true,
},
{
name: "Exact version match - Electra",
bidVersion: version.Electra,
headBlockVersion: version.Electra,
want: true,
name: "Exact version match - Electra",
bidVersion: version.Electra,
currentForkVersion: version.Electra,
want: true,
},
{
name: "Exact version match - Fulu",
bidVersion: version.Fulu,
headBlockVersion: version.Fulu,
want: true,
name: "Exact version match - Fulu",
bidVersion: version.Fulu,
currentForkVersion: version.Fulu,
want: true,
},
{
name: "Electra bid with Fulu head block - Compatible",
bidVersion: version.Electra,
headBlockVersion: version.Fulu,
want: true,
name: "Electra bid with Fulu head block - Compatible",
bidVersion: version.Electra,
currentForkVersion: version.Fulu,
want: true,
},
{
name: "Fulu bid with Electra head block - Not compatible",
bidVersion: version.Fulu,
headBlockVersion: version.Electra,
want: false,
name: "Fulu bid with Electra head block - Not compatible",
bidVersion: version.Fulu,
currentForkVersion: version.Electra,
want: false,
},
{
name: "Deneb bid with Electra head block - Not compatible",
bidVersion: version.Deneb,
headBlockVersion: version.Electra,
want: false,
name: "Deneb bid with Electra head block - Not compatible",
bidVersion: version.Deneb,
currentForkVersion: version.Electra,
want: false,
},
{
name: "Electra bid with Deneb head block - Not compatible",
bidVersion: version.Electra,
headBlockVersion: version.Deneb,
want: false,
name: "Electra bid with Deneb head block - Not compatible",
bidVersion: version.Electra,
currentForkVersion: version.Deneb,
want: false,
},
{
name: "Capella bid with Deneb head block - Not compatible",
bidVersion: version.Capella,
headBlockVersion: version.Deneb,
want: false,
name: "Capella bid with Deneb head block - Not compatible",
bidVersion: version.Capella,
currentForkVersion: version.Deneb,
want: false,
},
{
name: "Bellatrix bid with Capella head block - Not compatible",
bidVersion: version.Bellatrix,
headBlockVersion: version.Capella,
want: false,
name: "Bellatrix bid with Capella head block - Not compatible",
bidVersion: version.Bellatrix,
currentForkVersion: version.Capella,
want: false,
},
{
name: "Phase0 bid with Altair head block - Not compatible",
bidVersion: version.Phase0,
headBlockVersion: version.Altair,
want: false,
name: "Phase0 bid with Altair head block - Not compatible",
bidVersion: version.Phase0,
currentForkVersion: version.Altair,
want: false,
},
{
name: "Deneb bid with Fulu head block - Not compatible",
bidVersion: version.Deneb,
headBlockVersion: version.Fulu,
want: false,
name: "Deneb bid with Fulu head block - Not compatible",
bidVersion: version.Deneb,
currentForkVersion: version.Fulu,
want: false,
},
{
name: "Capella bid with Fulu head block - Not compatible",
bidVersion: version.Capella,
headBlockVersion: version.Fulu,
want: false,
name: "Capella bid with Fulu head block - Not compatible",
bidVersion: version.Capella,
currentForkVersion: version.Fulu,
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := isVersionCompatible(tt.bidVersion, tt.headBlockVersion)
got := isVersionCompatible(tt.bidVersion, tt.currentForkVersion)
if got != tt.want {
t.Errorf("isVersionCompatible(%d, %d) = %v, want %v", tt.bidVersion, tt.headBlockVersion, got, tt.want)
t.Errorf("isVersionCompatible(%d, %d) = %v, want %v", tt.bidVersion, tt.currentForkVersion, got, tt.want)
}
})
}

View File

@@ -4,6 +4,7 @@
package validator
import (
"bytes"
"context"
"time"
@@ -28,10 +29,11 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/genesis"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
@@ -155,32 +157,31 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
//
// DomainData fetches the current domain version information from the beacon state.
func (vs *Server) DomainData(ctx context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
fork, err := forks.Fork(request.Epoch)
if err != nil {
return nil, err
}
headGenesisValidatorsRoot := vs.HeadFetcher.HeadGenesisValidatorsRoot()
isExitDomain := [4]byte(request.Domain) == params.BeaconConfig().DomainVoluntaryExit
if isExitDomain {
epoch := request.Epoch
rd := bytesutil.ToBytes4(request.Domain)
if bytes.Equal(request.Domain, params.BeaconConfig().DomainVoluntaryExit[:]) {
hs, err := vs.HeadFetcher.HeadStateReadOnly(ctx)
if err != nil {
return nil, err
}
if hs.Version() >= version.Deneb {
fork = &ethpb.Fork{
if slots.ToEpoch(hs.Slot()) >= params.BeaconConfig().DenebForkEpoch {
return computeDomainData(rd, epoch, &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: params.BeaconConfig().CapellaForkEpoch,
}
})
}
}
dv, err := signing.Domain(fork, request.Epoch, bytesutil.ToBytes4(request.Domain), headGenesisValidatorsRoot[:])
return computeDomainData(rd, epoch, params.ForkFromConfig(params.BeaconConfig(), epoch))
}
func computeDomainData(domain [4]byte, epoch primitives.Epoch, fork *ethpb.Fork) (*ethpb.DomainResponse, error) {
gvr := genesis.ValidatorsRoot()
domainData, err := signing.Domain(fork, epoch, domain, gvr[:])
if err != nil {
return nil, err
}
return &ethpb.DomainResponse{
SignatureDomain: dv,
}, nil
return &ethpb.DomainResponse{SignatureDomain: domainData}, nil
}
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.

View File

@@ -2,6 +2,7 @@ package validator
import (
"context"
"fmt"
"sync"
"testing"
"time"
@@ -17,11 +18,13 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/mock"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/ethereum/go-ethereum/common/hexutil"
logTest "github.com/sirupsen/logrus/hooks/test"
"go.uber.org/mock/gomock"
"google.golang.org/grpc/codes"
@@ -317,55 +320,63 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
require.LogsContain(t, hook, "Sending genesis time")
}
func TestServer_DomainData_Exits(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
[4]byte(cfg.GenesisForkVersion): primitives.Epoch(0),
[4]byte(cfg.AltairForkVersion): primitives.Epoch(5),
[4]byte(cfg.BellatrixForkVersion): primitives.Epoch(10),
[4]byte(cfg.CapellaForkVersion): primitives.Epoch(15),
[4]byte(cfg.DenebForkVersion): primitives.Epoch(20),
}
params.OverrideBeaconConfig(cfg)
beaconState := &ethpb.BeaconStateBellatrix{
Slot: 4000,
}
block := util.NewBeaconBlock()
genesisRoot, err := block.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
s, err := state_native.InitializeFromProtoUnsafeBellatrix(beaconState)
func testSigDomainForSlot(t *testing.T, domain [4]byte, chsrv *mockChain.ChainService, epoch primitives.Epoch) *ethpb.DomainResponse {
cfg := params.BeaconConfig()
gvr := genesis.ValidatorsRoot()
s, err := state_native.InitializeFromProtoUnsafeDeneb(&ethpb.BeaconStateDeneb{
Slot: primitives.Slot(epoch) * cfg.SlotsPerEpoch,
GenesisValidatorsRoot: gvr[:],
})
require.NoError(t, err)
chsrv.State = s
vs := &Server{
Ctx: t.Context(),
ChainStartFetcher: &mockExecution.Chain{},
HeadFetcher: &mockChain.ChainService{State: s, Root: genesisRoot[:]},
HeadFetcher: chsrv,
}
reqDomain, err := vs.DomainData(t.Context(), &ethpb.DomainRequest{
Epoch: 100,
Domain: params.BeaconConfig().DomainDeposit[:],
})
assert.NoError(t, err)
wantedDomain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, params.BeaconConfig().DenebForkVersion, make([]byte, 32))
assert.NoError(t, err)
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)
beaconStateNew := &ethpb.BeaconStateDeneb{
Slot: 4000,
}
s, err = state_native.InitializeFromProtoUnsafeDeneb(beaconStateNew)
require.NoError(t, err)
vs.HeadFetcher = &mockChain.ChainService{State: s, Root: genesisRoot[:]}
reqDomain, err = vs.DomainData(t.Context(), &ethpb.DomainRequest{
Epoch: 100,
Domain: params.BeaconConfig().DomainVoluntaryExit[:],
domainResp, err := vs.DomainData(t.Context(), &ethpb.DomainRequest{
Epoch: epoch,
Domain: domain[:],
})
require.NoError(t, err)
wantedDomain, err = signing.ComputeDomain(params.BeaconConfig().DomainVoluntaryExit, params.BeaconConfig().CapellaForkVersion, make([]byte, 32))
require.NoError(t, err)
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)
return domainResp
}
func requireSigningEqual(t *testing.T, name string, domain [4]byte, req, want primitives.Epoch, chsrv *mockChain.ChainService) {
t.Run(fmt.Sprintf("%s_%#x", name, domain), func(t *testing.T) {
gvr := genesis.ValidatorsRoot()
resp := testSigDomainForSlot(t, domain, chsrv, req)
entry := params.GetNetworkScheduleEntry(want)
wanted, err := signing.ComputeDomain(domain, entry.ForkVersion[:], gvr[:])
assert.NoError(t, err)
assert.Equal(t, hexutil.Encode(wanted), hexutil.Encode(resp.SignatureDomain))
})
}
func TestServer_DomainData_Exits(t *testing.T) {
// This test makes 2 sets of assertions:
// - the deposit domain is always computed wrt the fork version at the given epoch
// - the exit domain is the same until deneb, at which point it is always computed wrt the capella fork version
params.SetActiveTestCleanup(t, params.MainnetConfig())
params.BeaconConfig().InitializeForkSchedule()
cfg := params.BeaconConfig()
block := util.NewBeaconBlock()
genesisRoot, err := block.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
chsrv := &mockChain.ChainService{Root: genesisRoot[:]}
last := params.LastForkEpoch()
requireSigningEqual(t, "genesis deposit", cfg.DomainDeposit, cfg.GenesisEpoch, cfg.GenesisEpoch, chsrv)
requireSigningEqual(t, "altair deposit", cfg.DomainDeposit, cfg.AltairForkEpoch, cfg.AltairForkEpoch, chsrv)
requireSigningEqual(t, "bellatrix deposit", cfg.DomainDeposit, cfg.BellatrixForkEpoch, cfg.BellatrixForkEpoch, chsrv)
requireSigningEqual(t, "capella deposit", cfg.DomainDeposit, cfg.CapellaForkEpoch, cfg.CapellaForkEpoch, chsrv)
requireSigningEqual(t, "deneb deposit", cfg.DomainDeposit, cfg.DenebForkEpoch, cfg.DenebForkEpoch, chsrv)
requireSigningEqual(t, "last epoch deposit", cfg.DomainDeposit, last, last, chsrv)
requireSigningEqual(t, "genesis exit", cfg.DomainVoluntaryExit, cfg.GenesisEpoch, cfg.GenesisEpoch, chsrv)
requireSigningEqual(t, "altair exit", cfg.DomainVoluntaryExit, cfg.AltairForkEpoch, cfg.AltairForkEpoch, chsrv)
requireSigningEqual(t, "bellatrix exit", cfg.DomainVoluntaryExit, cfg.BellatrixForkEpoch, cfg.BellatrixForkEpoch, chsrv)
requireSigningEqual(t, "capella exit", cfg.DomainVoluntaryExit, cfg.CapellaForkEpoch, cfg.CapellaForkEpoch, chsrv)
requireSigningEqual(t, "deneb exit", cfg.DomainVoluntaryExit, cfg.DenebForkEpoch, cfg.CapellaForkEpoch, chsrv)
requireSigningEqual(t, "last epoch exit", cfg.DomainVoluntaryExit, last, cfg.CapellaForkEpoch, chsrv)
}

View File

@@ -37,7 +37,7 @@ func TestUnblinder_UnblindBlobSidecars_InvalidBundle(t *testing.T) {
func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
// Test that the function accepts BlobsBundler interface
// This test focuses on the interface change rather than full integration
t.Run("Interface compatibility with BlobsBundle", func(t *testing.T) {
// Create a simple pre-Deneb block that will return nil (no processing needed)
wBlock, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockCapella{
@@ -87,7 +87,7 @@ func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
t.Run("Function signature accepts BlobsBundler interface", func(t *testing.T) {
// This test verifies that the function signature has been updated to accept BlobsBundler
// We test this by verifying the code compiles with both types
// Create a simple pre-Deneb block for the interface test
wBlock, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockCapella{
Block: &ethpb.BeaconBlockCapella{
@@ -106,7 +106,7 @@ func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
_, err = unblindBlobsSidecars(wBlock, regularBundle)
require.NoError(t, err)
// Verify function accepts BlobsBundleV2 through the interface
// Verify function accepts BlobsBundleV2 through the interface
var bundleV2 enginev1.BlobsBundler = &enginev1.BlobsBundleV2{
KzgCommitments: [][]byte{make([]byte, 48)},
Proofs: [][]byte{make([]byte, 48)},

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"clock.go",
"synchronizer.go",
"testing.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/startup",
visibility = ["//visibility:public"],

View File

@@ -41,6 +41,11 @@ func (g *Clock) CurrentSlot() types.Slot {
return slots.Duration(g.t, now)
}
// CurrentEpoch returns the current epoch relative to the time.Time value that Clock embeds.
func (g *Clock) CurrentEpoch() types.Epoch {
return slots.ToEpoch(g.CurrentSlot())
}
// SlotStart computes the time the given slot begins.
func (g *Clock) SlotStart(slot types.Slot) (time.Time, error) {
return slots.StartTime(g.t, slot)

View File

@@ -0,0 +1,33 @@
package startup
import (
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/time/slots"
)
// MockNower is a mock implementation of the Nower interface for use in tests.
type MockNower struct {
t time.Time
}
// Now satisfies the Nower interface using a mocked time value
func (m *MockNower) Now() time.Time {
return m.t
}
// SetSlot sets the current time to the start of the given slot.
func (m *MockNower) SetSlot(t *testing.T, c *Clock, s primitives.Slot) {
now, err := slots.StartTime(c.GenesisTime(), s)
if err != nil {
t.Fatalf("failed to set slot: %v", err)
}
m.t = now
}
// Set sets the current time to the given time.
func (m *MockNower) Set(now time.Time) {
m.t = now
}

View File

@@ -1,34 +0,0 @@
package genesis
import (
_ "embed"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/golang/snappy"
)
var embeddedStates = map[string]*[]byte{}
// State returns a copy of the genesis state from a hardcoded value.
func State(name string) (state.BeaconState, error) {
sb, exists := embeddedStates[name]
if exists {
return load(*sb)
}
return nil, nil
}
// load a compressed ssz state file into a beacon state struct.
func load(b []byte) (state.BeaconState, error) {
st := &ethpb.BeaconState{}
b, err := snappy.Decode(nil /*dst*/, b)
if err != nil {
return nil, err
}
if err := st.UnmarshalSSZ(b); err != nil {
return nil, err
}
return state_native.InitializeFromProtoUnsafePhase0(st)
}

View File

@@ -351,3 +351,7 @@ type WriteOnlyDeposits interface {
type WriteOnlyProposerLookahead interface {
SetProposerLookahead([]primitives.ValidatorIndex) error
}
func IsNil(s BeaconState) bool {
return s == nil || s.IsNil()
}

View File

@@ -34,3 +34,7 @@ var fieldMap map[types.FieldIndex]types.DataType
func errNotSupported(funcName string, ver int) error {
return fmt.Errorf("%s is not supported for %s", funcName, version.String(ver))
}
func IsNil(s state.BeaconState) bool {
return s == nil || s.IsNil()
}

View File

@@ -8,6 +8,7 @@ go_library(
"broadcast_bls_changes.go",
"context.go",
"custody.go",
"data_column_sidecars.go",
"data_columns_reconstruct.go",
"deadlines.go",
"decode_pubsub.go",
@@ -126,7 +127,6 @@ go_library(
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
@@ -168,6 +168,7 @@ go_test(
"broadcast_bls_changes_test.go",
"context_test.go",
"custody_test.go",
"data_column_sidecars_test.go",
"data_columns_reconstruct_test.go",
"decode_pubsub_test.go",
"error_test.go",
@@ -265,7 +266,6 @@ go_test(
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/equality:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",

View File

@@ -35,7 +35,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime:go_default_library",
@@ -80,7 +79,6 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/dbval:go_default_library",
"//runtime/interop:go_default_library",
"//testing/require:go_default_library",

View File

@@ -91,9 +91,7 @@ func (bs *blobSync) validateNext(rb blocks.ROBlob) error {
return err
}
sc := blocks.NewSidecarFromBlobSidecar(rb)
if err := bs.store.Persist(bs.current, sc); err != nil {
if err := bs.store.Persist(bs.current, rb); err != nil {
return err
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
@@ -107,8 +106,7 @@ func (vr verifier) blockSignatureBatch(b blocks.ROBlock) (*bls.SignatureBatch, e
}
func newBackfillVerifier(vr []byte, keys [][fieldparams.BLSPubkeyLength]byte) (*verifier, error) {
dc, err := newDomainCache(vr, params.BeaconConfig().DomainBeaconProposer,
forks.NewOrderedSchedule(params.BeaconConfig()))
dc, err := newDomainCache(vr, params.BeaconConfig().DomainBeaconProposer)
if err != nil {
return nil, err
}
@@ -122,33 +120,31 @@ func newBackfillVerifier(vr []byte, keys [][fieldparams.BLSPubkeyLength]byte) (*
// domainCache provides a fast signing domain lookup by epoch.
type domainCache struct {
fsched forks.OrderedSchedule
forkDomains map[[4]byte][]byte
dType [bls.DomainByteLength]byte
}
func newDomainCache(vRoot []byte, dType [bls.DomainByteLength]byte, fsched forks.OrderedSchedule) (*domainCache, error) {
func newDomainCache(vRoot []byte, dType [bls.DomainByteLength]byte) (*domainCache, error) {
dc := &domainCache{
fsched: fsched,
forkDomains: make(map[[4]byte][]byte),
dType: dType,
}
for _, entry := range fsched {
d, err := signing.ComputeDomain(dc.dType, entry.Version[:], vRoot)
for _, entry := range params.SortedForkSchedule() {
d, err := signing.ComputeDomain(dc.dType, entry.ForkVersion[:], vRoot)
if err != nil {
return nil, errors.Wrapf(err, "failed to pre-compute signing domain for fork version=%#x", entry.Version)
return nil, errors.Wrapf(err, "failed to pre-compute signing domain for fork version=%#x", entry.ForkVersion)
}
dc.forkDomains[entry.Version] = d
dc.forkDomains[entry.ForkVersion] = d
}
return dc, nil
}
func (dc *domainCache) forEpoch(e primitives.Epoch) ([]byte, error) {
fork, err := dc.fsched.VersionForEpoch(e)
fork, err := params.Fork(e)
if err != nil {
return nil, err
}
d, ok := dc.forkDomains[fork]
d, ok := dc.forkDomains[[4]byte(fork.CurrentVersion)]
if !ok {
return nil, errors.Wrapf(errUnknownDomain, "fork version=%#x, epoch=%d", fork, e)
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/runtime/interop"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -30,18 +29,17 @@ func TestDomainCache(t *testing.T) {
}
vRoot, err := hexutil.Decode("0x0011223344556677889900112233445566778899001122334455667788990011")
require.NoError(t, err)
dType := cfg.DomainBeaconProposer
require.NoError(t, err)
require.Equal(t, 32, len(vRoot))
fsched := forks.NewOrderedSchedule(cfg)
dc, err := newDomainCache(vRoot, dType, fsched)
dc, err := newDomainCache(vRoot, dType)
require.NoError(t, err)
require.Equal(t, len(fsched), len(dc.forkDomains))
for i := range fsched {
e := fsched[i].Epoch
ad, err := dc.forEpoch(e)
schedule := params.SortedForkSchedule()
require.Equal(t, len(schedule), len(dc.forkDomains))
for _, entry := range schedule {
ad, err := dc.forEpoch(entry.Epoch)
require.NoError(t, err)
ed, err := signing.ComputeDomain(dType, fsched[i].Version[:], vRoot)
ed, err := signing.ComputeDomain(dType, entry.ForkVersion[:], vRoot)
require.NoError(t, err)
require.DeepEqual(t, ed, ad)
}

View File

@@ -21,7 +21,6 @@ import (
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -156,11 +155,7 @@ func (r *expectedBlobChunk) requireExpected(t *testing.T, s *Service, stream net
c, err := readContextFromStream(stream)
require.NoError(t, err)
valRoot := s.cfg.chain.GenesisValidatorsRoot()
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(r.sidecar.Slot()), valRoot[:])
require.NoError(t, err)
require.Equal(t, ctxBytes, bytesutil.ToBytes4(c))
require.Equal(t, params.ForkDigest(slots.ToEpoch(r.sidecar.Slot())), bytesutil.ToBytes4(c))
sc := &ethpb.BlobSidecar{}
require.NoError(t, encoding.DecodeWithMaxLength(stream, sc))
@@ -270,27 +265,24 @@ func (c *blobsTestCase) run(t *testing.T) {
// we use max uints for future forks, but this causes overflows when computing slots
// so it is helpful in tests to temporarily reposition the epochs to give room for some math.
func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
if cfg.CapellaForkEpoch == math.MaxUint64 {
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch + 100
}
if cfg.DenebForkEpoch == math.MaxUint64 {
cfg.DenebForkEpoch = cfg.CapellaForkEpoch + 100
if cfg.FuluForkEpoch == math.MaxUint64 {
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 100
}
}
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
de := params.BeaconConfig().DenebForkEpoch
df, err := forks.Fork(de)
df, err := params.Fork(de)
require.NoError(t, err)
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
ce := de + denebBuffer
fe := ce - 2
cs, err := slots.EpochStart(ce)
require.NoError(t, err)
now := time.Now()
genOffset := types.Slot(params.BeaconConfig().SecondsPerSlot) * cs
genesis := now.Add(-1 * time.Second * time.Duration(int64(genOffset)))
clock := startup.NewClock(genesis, [32]byte{})
genesis := time.Now()
mockNow := startup.MockNower{}
clock := startup.NewClock(genesis, params.BeaconConfig().GenesisValidatorsRoot, startup.WithNower(mockNow.Now))
mockNow.SetSlot(t, clock, cs)
chain := &mock.ChainService{
FinalizedCheckPoint: &ethpb.Checkpoint{Epoch: fe},
Fork: df,

View File

@@ -46,7 +46,6 @@ go_test(
"//consensus-types/blocks/testing:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",

View File

@@ -12,7 +12,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
blocktest "github.com/OffchainLabs/prysm/v6/consensus-types/blocks/testing"
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -22,6 +21,7 @@ import (
func TestDownloadFinalizedData(t *testing.T) {
ctx := t.Context()
cfg := params.MainnetConfig()
cfg.InitializeForkSchedule()
// avoid the altair zone because genesis tests are easier to set up
epoch := cfg.AltairForkEpoch - 1
@@ -30,7 +30,7 @@ func TestDownloadFinalizedData(t *testing.T) {
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forks.ForkForEpochFromConfig(cfg, epoch)
fork := params.ForkFromConfig(cfg, epoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))
require.NoError(t, st.SetSlot(slot))

View File

@@ -16,7 +16,6 @@ import (
blocktest "github.com/OffchainLabs/prysm/v6/consensus-types/blocks/testing"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -83,7 +82,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, epoch)
fork, err := params.Fork(epoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
@@ -182,7 +181,7 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
require.NoError(t, err)
wst, err := util.NewBeaconState()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.GenesisEpoch)
fork, err := params.Fork(cfg.GenesisEpoch)
require.NoError(t, err)
require.NoError(t, wst.SetFork(fork))
@@ -279,33 +278,11 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
require.Equal(t, expectedEpoch, actualEpoch)
}
func forkForEpoch(cfg *params.BeaconChainConfig, epoch primitives.Epoch) (*ethpb.Fork, error) {
os := forks.NewOrderedSchedule(cfg)
currentVersion, err := os.VersionForEpoch(epoch)
if err != nil {
return nil, err
}
prevVersion, err := os.Previous(currentVersion)
if err != nil {
if !errors.Is(err, forks.ErrNoPreviousVersion) {
return nil, err
}
// use same version for both in the case of genesis
prevVersion = currentVersion
}
forkEpoch := cfg.ForkVersionSchedule[currentVersion]
return &ethpb.Fork{
PreviousVersion: prevVersion[:],
CurrentVersion: currentVersion[:],
Epoch: forkEpoch,
}, nil
}
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, primitives.Epoch) {
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
fork, err := forkForEpoch(cfg, cfg.AltairForkEpoch)
fork, err := params.Fork(cfg.AltairForkEpoch)
require.NoError(t, err)
require.NoError(t, st.SetFork(fork))

View File

@@ -3,7 +3,6 @@ package sync
import (
"io"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/libp2p/go-libp2p/core/network"
@@ -87,12 +86,8 @@ type ContextByteVersions map[[4]byte]int
// and the runtime/version identifier for the corresponding fork.
func ContextByteVersionsForValRoot(valRoot [32]byte) (ContextByteVersions, error) {
m := make(ContextByteVersions)
for fv, v := range params.ConfigForkVersions(params.BeaconConfig()) {
digest, err := signing.ComputeForkDigest(fv[:], valRoot[:])
if err != nil {
return nil, errors.Wrapf(err, "unable to compute fork digest for fork version %#x", fv)
}
m[digest] = v
for _, entry := range params.SortedNetworkScheduleEntries() {
m[entry.ForkDigest] = entry.VersionEnum
}
return m, nil
}

View File

@@ -22,11 +22,11 @@ import (
)
type testSetup struct {
service *Service
p2pService *p2ptest.TestP2P
beaconDB db.Database
ctx context.Context
initialSlot primitives.Slot
service *Service
p2pService *p2ptest.TestP2P
beaconDB db.Database
ctx context.Context
initialSlot primitives.Slot
initialCount uint64
}
@@ -34,7 +34,7 @@ func setupCustodyTest(t *testing.T, withChain bool) *testSetup {
ctx := t.Context()
p2pService := p2ptest.NewTestP2P(t)
beaconDB := dbtesting.SetupDB(t)
const (
initialEarliestSlot = primitives.Slot(50)
initialCustodyCount = uint64(5)
@@ -123,7 +123,7 @@ func TestUpdateCustodyInfoIfNeeded(t *testing.T) {
t.Run("Skip update when actual custody count >= target", func(t *testing.T) {
setup := setupCustodyTest(t, false)
err := setup.service.updateCustodyInfoIfNeeded()
require.NoError(t, err)

View File

@@ -0,0 +1,864 @@
package sync
import (
"context"
"slices"
"sync"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
prysmP2P "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v6/crypto/rand"
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
goPeer "github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// DataColumnSidecarsParams stores the common parameters needed to
// fetch data column sidecars from peers.
type DataColumnSidecarsParams struct {
Ctx context.Context // Context
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
P2P prysmP2P.P2P // P2P network interface
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
Storage filesystem.DataColumnStorageReader // Data columns storage
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
}
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given
// blocks and requested data column indices. It employs a multi-step strategy:
//
// 1. Direct retrieval: If all requested columns are available in storage, they are
// retrieved directly without reconstruction.
// 2. Reconstruction-based retrieval: If some requested columns are missing but sufficient
// stored columns exist (at least the minimum required for reconstruction), the function
// reconstructs all columns and extracts the requested indices.
// 3. Peer retrieval: If storage and reconstruction fail, missing columns are requested
// from connected peers that are expected to custody the required data.
//
// The function returns a map of block roots to their corresponding verified read-only data
// columns. It returns an error if data column storage is unavailable, if storage/reconstruction
// operations fail unexpectedly, or if not all requested columns could be retrieved from peers.
func FetchDataColumnSidecars(
params DataColumnSidecarsParams,
roBlocks []blocks.ROBlock,
indicesMap map[uint64]bool,
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
if len(roBlocks) == 0 || len(indicesMap) == 0 {
return nil, nil
}
indices := sortedSliceFromMap(indicesMap)
slotsWithCommitments := make(map[primitives.Slot]bool)
indicesByRootToQuery := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
indicesByRootStored := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
for _, roBlock := range roBlocks {
// Filter out blocks without commitments.
block := roBlock.Block()
commitments, err := block.Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrapf(err, "get blob kzg commitments for block root %#x", roBlock.Root())
}
if len(commitments) == 0 {
continue
}
slotsWithCommitments[block.Slot()] = true
root := roBlock.Root()
// Step 1: Get the requested sidecars for this root if available in storage
requestedColumns, err := tryGetDirectColumns(params.Storage, root, indices)
if err != nil {
return nil, errors.Wrap(err, "try get direct columns")
}
if requestedColumns != nil {
result[root] = requestedColumns
continue
}
// Step 2: If step 1 failed, reconstruct the requested sidecars from what is available in storage
requestedColumns, err = tryGetReconstructedColumns(params.Storage, root, indices)
if err != nil {
return nil, errors.Wrap(err, "try get reconstructed columns")
}
if requestedColumns != nil {
result[root] = requestedColumns
continue
}
// Step 3a: If steps 1 and 2 failed, keep track of the sidecars that need to be queried from peers
// and those that are already stored.
indicesToQueryMap, indicesStoredMap := categorizeIndices(params.Storage, root, indices)
if len(indicesToQueryMap) > 0 {
indicesByRootToQuery[root] = indicesToQueryMap
}
if len(indicesStoredMap) > 0 {
indicesByRootStored[root] = indicesStoredMap
}
}
// Early return if no sidecars need to be queried from peers.
if len(indicesByRootToQuery) == 0 {
return result, nil
}
// Step 3b: Request missing sidecars from peers.
start, count := time.Now(), computeTotalCount(indicesByRootToQuery)
result, err := tryRequestingColumnsFromPeers(params, roBlocks, slotsWithCommitments, indicesByRootToQuery)
if err != nil {
return nil, errors.Wrap(err, "request from peers")
}
log.WithFields(logrus.Fields{"duration": time.Since(start), "count": count}).Debug("Requested data column sidecars from peers")
return result, nil
}
// tryGetDirectColumns attempts to retrieve all requested columns directly from storage
// if they are all available. Returns the columns if successful, nil and nil if not all available,
// or nil and error if an error occurs.
func tryGetDirectColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
// Check if all requested indices are present in cache
storedIndices := storage.Summary(blockRoot).Stored()
allRequestedPresent := true
for _, requestedIndex := range indices {
if !storedIndices[requestedIndex] {
allRequestedPresent = false
break
}
}
if !allRequestedPresent {
return nil, nil
}
// All requested data is present, retrieve directly from DB
requestedColumns, err := storage.Get(blockRoot, indices)
if err != nil {
return nil, errors.Wrapf(err, "failed to get data columns for block root %#x", blockRoot)
}
return requestedColumns, nil
}
// tryGetReconstructedColumns attempts to retrieve columns using reconstruction
// if sufficient columns are available. Returns the columns if successful, nil and nil if insufficient columns,
// or nil and error if an error occurs.
func tryGetReconstructedColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
// Check if we have enough columns for reconstruction
summary := storage.Summary(blockRoot)
if summary.Count() < peerdas.MinimumColumnsCountToReconstruct() {
return nil, nil
}
// Retrieve all stored columns for reconstruction
allStoredColumns, err := storage.Get(blockRoot, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to get all stored columns for reconstruction for block root %#x", blockRoot)
}
// Attempt reconstruction
reconstructedColumns, err := peerdas.ReconstructDataColumnSidecars(allStoredColumns)
if err != nil {
return nil, errors.Wrapf(err, "failed to reconstruct data columns for block root %#x", blockRoot)
}
// Health check: ensure we have the expected number of columns
numberOfColumns := params.BeaconConfig().NumberOfColumns
if uint64(len(reconstructedColumns)) != numberOfColumns {
return nil, errors.Errorf("reconstructed %d columns but expected %d for block root %#x", len(reconstructedColumns), numberOfColumns, blockRoot)
}
// Extract only the requested indices from reconstructed data using direct indexing
requestedColumns := make([]blocks.VerifiedRODataColumn, 0, len(indices))
for _, requestedIndex := range indices {
if requestedIndex >= numberOfColumns {
return nil, errors.Errorf("requested column index %d exceeds maximum %d for block root %#x", requestedIndex, numberOfColumns-1, blockRoot)
}
requestedColumns = append(requestedColumns, reconstructedColumns[requestedIndex])
}
return requestedColumns, nil
}
// categorizeIndices separates indices into those that need to be queried from peers
// and those that are already stored.
func categorizeIndices(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) (map[uint64]bool, map[uint64]bool) {
indicesToQuery := make(map[uint64]bool, len(indices))
indicesStored := make(map[uint64]bool, len(indices))
allStoredIndices := storage.Summary(blockRoot).Stored()
for _, index := range indices {
if allStoredIndices[index] {
indicesStored[index] = true
continue
}
indicesToQuery[index] = true
}
return indicesToQuery, indicesStored
}
// tryRequestingColumnsFromPeers attempts to request missing data column sidecars from connected peers.
// It explores the connected peers to find those that are expected to custody the requested columns
// and returns only when all requested columns are either retrieved or have been tried to be retrieved
// by all possible peers.
// Returns a map of block roots to their verified read-only data column sidecars and a map of block roots to indices of
// still missing sidecars after all attempts.
func tryRequestingColumnsFromPeers(
p DataColumnSidecarsParams,
roBlocks []blocks.ROBlock,
slotsWithCommitments map[primitives.Slot]bool,
indicesByRootToQuery map[[fieldparams.RootLength]byte]map[uint64]bool,
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
// Create a new random source for peer selection.
randomSource := rand.NewGenerator()
// Copy the requested indices to avoid modifying the original map.
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(indicesByRootToQuery))
for root, indices := range indicesByRootToQuery {
missingIndicesByRoot[root] = make(map[uint64]bool, len(indices))
for index := range indices {
missingIndicesByRoot[root][index] = true
}
}
// Compute slots by block root.
slotByRoot := computeSlotByBlockRoot(roBlocks)
// Determine all sidecars each peers are expected to custody.
connectedPeersSlice := p.P2P.Peers().Connected()
connectedPeers := make(map[goPeer.ID]bool, len(connectedPeersSlice))
for _, peer := range connectedPeersSlice {
connectedPeers[peer] = true
}
indicesByRootByPeer, err := computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
if err != nil {
return nil, errors.Wrap(err, "explore peers")
}
verifiedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
for len(missingIndicesByRoot) > 0 && len(indicesByRootByPeer) > 0 {
count := 0
for _, indices := range missingIndicesByRoot {
count += len(indices)
}
// Select peers to query the missing sidecars from.
indicesByRootByPeerToQuery, err := selectPeers(p, randomSource, len(missingIndicesByRoot), indicesByRootByPeer)
if err != nil {
return nil, errors.Wrap(err, "select peers")
}
// Remove selected peers from the maps.
for peer := range indicesByRootByPeerToQuery {
delete(connectedPeers, peer)
}
// Fetch the sidecars from the chosen peers.
roDataColumnsByPeer, err := fetchDataColumnSidecarsFromPeers(p, slotByRoot, slotsWithCommitments, indicesByRootByPeerToQuery)
if err != nil {
return nil, errors.Wrap(err, "fetch data column sidecars from peers")
}
// Verify the received data column sidecars.
verifiedRoDataColumnSidecars, err := verifyDataColumnSidecarsByPeer(p.P2P, p.NewVerifier, roDataColumnsByPeer)
if err != nil {
return nil, errors.Wrap(err, "verify data columns sidecars by peer")
}
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
newMissingIndicesByRoot, localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
missingIndicesByRoot = newMissingIndicesByRoot
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
}
// Compute indices by root by peers with the updated missing indices and connected peers.
indicesByRootByPeer, err = computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
if err != nil {
return nil, errors.Wrap(err, "explore peers")
}
}
if len(missingIndicesByRoot) > 0 {
return nil, errors.New("not all requested data column sidecars were retrieved from peers")
}
return verifiedColumnsByRoot, nil
}
// selectPeers selects peers to query the sidecars.
func selectPeers(
p DataColumnSidecarsParams,
randomSource *rand.Rand,
count int,
origIndicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
const randomPeerTimeout = 30 * time.Second
// Select peers to query the missing sidecars from.
indicesByRootByPeer := copyIndicesByRootByPeer(origIndicesByRootByPeer)
internalIndicesByRootByPeer := copyIndicesByRootByPeer(indicesByRootByPeer)
indicesByRootByPeerToQuery := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
for len(internalIndicesByRootByPeer) > 0 {
// Randomly select a peer with enough bandwidth.
peer, err := func() (goPeer.ID, error) {
ctx, cancel := context.WithTimeout(p.Ctx, randomPeerTimeout)
defer cancel()
peer, err := randomPeer(ctx, randomSource, p.RateLimiter, count, indicesByRootByPeer)
if err != nil {
return "", errors.Wrap(err, "select random peer")
}
return peer, err
}()
if err != nil {
return nil, err
}
// Query all the sidecars that peer can offer us.
newIndicesByRoot := indicesByRootByPeer[peer]
indicesByRootByPeerToQuery[peer] = newIndicesByRoot
// Remove this peer from the maps to avoid re-selection.
delete(indicesByRootByPeer, peer)
delete(internalIndicesByRootByPeer, peer)
// Delete the corresponding sidecars from other peers in the internal map
// to avoid re-selection during this iteration.
for peer, indicesByRoot := range internalIndicesByRootByPeer {
for root, indices := range indicesByRoot {
newIndices := newIndicesByRoot[root]
for index := range newIndices {
delete(indices, index)
}
if len(indices) == 0 {
delete(indicesByRoot, root)
}
}
if len(indicesByRoot) == 0 {
delete(internalIndicesByRootByPeer, peer)
}
}
}
return indicesByRootByPeerToQuery, nil
}
// updateResults updates the missing indices and verified sidecars maps based on the newly verified sidecars.
func updateResults(
verifiedSidecars []blocks.VerifiedRODataColumn,
origMissingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
) (map[[fieldparams.RootLength]byte]map[uint64]bool, map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn) {
// Copy the original map to avoid modifying it directly.
missingIndicesByRoot := copyIndicesByRoot(origMissingIndicesByRoot)
verifiedSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
for _, verifiedSidecar := range verifiedSidecars {
blockRoot := verifiedSidecar.BlockRoot()
index := verifiedSidecar.Index
// Add to the result map grouped by block root
verifiedSidecarsByRoot[blockRoot] = append(verifiedSidecarsByRoot[blockRoot], verifiedSidecar)
if indices, ok := missingIndicesByRoot[blockRoot]; ok {
delete(indices, index)
if len(indices) == 0 {
delete(missingIndicesByRoot, blockRoot)
}
}
}
return missingIndicesByRoot, verifiedSidecarsByRoot
}
// fetchDataColumnSidecarsFromPeers retrieves data column sidecars from peers.
func fetchDataColumnSidecarsFromPeers(
params DataColumnSidecarsParams,
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
slotsWithCommitments map[primitives.Slot]bool,
indicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
) (map[goPeer.ID][]blocks.RODataColumn, error) {
var (
wg sync.WaitGroup
mut sync.Mutex
)
roDataColumnsByPeer := make(map[goPeer.ID][]blocks.RODataColumn)
wg.Add(len(indicesByRootByPeer))
for peerID, indicesByRoot := range indicesByRootByPeer {
go func(peerID goPeer.ID, indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) {
defer wg.Done()
requestedCount := 0
for _, indices := range indicesByRoot {
requestedCount += len(indices)
}
log := log.WithFields(logrus.Fields{
"peerID": peerID,
"agent": agentString(peerID, params.P2P.Host()),
"blockCount": len(indicesByRoot),
"totalRequestedCount": requestedCount,
})
roDataColumns, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, peerID, indicesByRoot)
if err != nil {
log.WithError(err).Warning("Failed to send data column sidecars request")
return
}
mut.Lock()
defer mut.Unlock()
roDataColumnsByPeer[peerID] = roDataColumns
}(peerID, indicesByRoot)
}
wg.Wait()
return roDataColumnsByPeer, nil
}
func sendDataColumnSidecarsRequest(
params DataColumnSidecarsParams,
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
slotsWithCommitments map[primitives.Slot]bool,
peerID goPeer.ID,
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
) ([]blocks.RODataColumn, error) {
const batchSize = 32
rootCount := int64(len(indicesByRoot))
requestedSidecarsCount := 0
for _, indices := range indicesByRoot {
requestedSidecarsCount += len(indices)
}
log := log.WithFields(logrus.Fields{
"peerID": peerID,
"agent": agentString(peerID, params.P2P.Host()),
"requestedSidecars": requestedSidecarsCount,
})
// Try to build a by range byRangeRequest first.
byRangeRequests, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
if err != nil {
return nil, errors.Wrap(err, "craft by range request")
}
// If we have a valid by range request, send it.
if len(byRangeRequests) > 0 {
count := 0
for _, indices := range indicesByRoot {
count += len(indices)
}
start := time.Now()
roDataColumns := make([]blocks.RODataColumn, 0, count)
for _, request := range byRangeRequests {
if params.RateLimiter != nil {
params.RateLimiter.Add(peerID.String(), rootCount)
}
localRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(params, peerID, request)
if err != nil {
return nil, errors.Wrapf(err, "send data column sidecars by range request to peer %s", peerID)
}
roDataColumns = append(roDataColumns, localRoDataColumns...)
}
log.WithFields(logrus.Fields{
"respondedSidecars": len(roDataColumns),
"requests": len(byRangeRequests),
"type": "byRange",
"duration": time.Since(start),
}).Debug("Received data column sidecars")
return roDataColumns, nil
}
// Build identifiers for the by root request.
byRootRequest := buildByRootRequest(indicesByRoot)
// Send the by root request.
start := time.Now()
if params.RateLimiter != nil {
params.RateLimiter.Add(peerID.String(), rootCount)
}
roDataColumns, err := SendDataColumnSidecarsByRootRequest(params, peerID, byRootRequest)
if err != nil {
return nil, errors.Wrapf(err, "send data column sidecars by root request to peer %s", peerID)
}
log.WithFields(logrus.Fields{
"respondedSidecars": len(roDataColumns),
"requests": 1,
"type": "byRoot",
"duration": time.Since(start),
}).Debug("Received data column sidecars")
return roDataColumns, nil
}
// buildByRangeRequests constructs a by range request from the given indices,
// only if the indices are the same all blocks and if the blocks are contiguous.
// (Missing blocks or blocks without commitments do count as contiguous)
// If one of this condition is not met, returns nil.
func buildByRangeRequests(
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
slotsWithCommitments map[primitives.Slot]bool,
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
batchSize uint64,
) ([]*ethpb.DataColumnSidecarsByRangeRequest, error) {
if len(indicesByRoot) == 0 {
return nil, nil
}
var reference map[uint64]bool
slots := make([]primitives.Slot, 0, len(slotByRoot))
for root, indices := range indicesByRoot {
if reference == nil {
reference = indices
}
if !compareIndices(reference, indices) {
return nil, nil
}
slot, ok := slotByRoot[root]
if !ok {
return nil, errors.Errorf("slot not found for block root %#x", root)
}
slots = append(slots, slot)
}
slices.Sort(slots)
for i := 1; i < len(slots); i++ {
previous, current := slots[i-1], slots[i]
if current == previous+1 {
continue
}
for j := previous + 1; j < current; j++ {
if slotsWithCommitments[j] {
return nil, nil
}
}
}
columns := sortedSliceFromMap(reference)
startSlot, endSlot := slots[0], slots[len(slots)-1]
totalCount := uint64(endSlot - startSlot + 1)
requests := make([]*ethpb.DataColumnSidecarsByRangeRequest, 0, totalCount/batchSize)
for start := startSlot; start <= endSlot; start += primitives.Slot(batchSize) {
end := min(start+primitives.Slot(batchSize)-1, endSlot)
request := &ethpb.DataColumnSidecarsByRangeRequest{
StartSlot: start,
Count: uint64(end - start + 1),
Columns: columns,
}
requests = append(requests, request)
}
return requests, nil
}
// buildByRootRequest constructs a by root request from the given indices.
func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) p2ptypes.DataColumnsByRootIdentifiers {
identifiers := make(p2ptypes.DataColumnsByRootIdentifiers, 0, len(indicesByRoot))
for root, indices := range indicesByRoot {
identifier := &eth.DataColumnsByRootIdentifier{
BlockRoot: root[:],
Columns: sortedSliceFromMap(indices),
}
identifiers = append(identifiers, identifier)
}
return identifiers
}
// verifyDataColumnSidecarsByPeer verifies the received data column sidecars.
// If at least one sidecar from a peer is invalid, the peer is downscored and
// all its sidecars are rejected. (Sidecars from other peers are still accepted.)
func verifyDataColumnSidecarsByPeer(
p2p prysmP2P.P2P,
newVerifier verification.NewDataColumnsVerifier,
roDataColumnsByPeer map[goPeer.ID][]blocks.RODataColumn,
) ([]blocks.VerifiedRODataColumn, error) {
// First optimistically verify all received data columns in a single batch.
count := 0
for _, columns := range roDataColumnsByPeer {
count += len(columns)
}
roDataColumnSidecars := make([]blocks.RODataColumn, 0, count)
for _, columns := range roDataColumnsByPeer {
roDataColumnSidecars = append(roDataColumnSidecars, columns...)
}
verifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, roDataColumnSidecars)
if err == nil {
// This is the happy path where all sidecars are verified.
return verifiedRoDataColumnSidecars, nil
}
// An error occurred during verification, which means that at least one sidecar is invalid.
// Reverify peer by peer to identify faulty peer(s), reject all its sidecars, and downscore it.
verifiedRoDataColumnSidecars = make([]blocks.VerifiedRODataColumn, 0, count)
for peer, columns := range roDataColumnsByPeer {
peerVerifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, columns)
if err != nil {
// This peer has invalid sidecars.
log := log.WithError(err).WithField("peerID", peer)
newScore := p2p.Peers().Scorers().BadResponsesScorer().Increment(peer)
log.Warning("Peer returned invalid data column sidecars")
log.WithFields(logrus.Fields{"reason": "invalidDataColumnSidecars", "newScore": newScore}).Debug("Downscore peer")
}
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, peerVerifiedRoDataColumnSidecars...)
}
return verifiedRoDataColumnSidecars, nil
}
// verifyByRootDataColumnSidecars verifies the provided read-only data columns against the
// requirements for data column sidecars received via the by root request.
func verifyByRootDataColumnSidecars(newVerifier verification.NewDataColumnsVerifier, roDataColumns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
verifier := newVerifier(roDataColumns, verification.ByRootRequestDataColumnSidecarRequirements)
if err := verifier.ValidFields(); err != nil {
return nil, errors.Wrap(err, "valid fields")
}
if err := verifier.SidecarInclusionProven(); err != nil {
return nil, errors.Wrap(err, "sidecar inclusion proven")
}
if err := verifier.SidecarKzgProofVerified(); err != nil {
return nil, errors.Wrap(err, "sidecar KZG proof verified")
}
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
if err != nil {
return nil, errors.Wrap(err, "verified RO data columns - should never happen")
}
return verifiedRoDataColumns, nil
}
// computeIndicesByRootByPeer returns a peers->root->indices map only for
// root and indices given in `indicesByBlockRoot`. It also only selects peers
// for a given root only if its head state is higher than the block slot.
func computeIndicesByRootByPeer(
p2p prysmP2P.P2P,
slotByBlockRoot map[[fieldparams.RootLength]byte]primitives.Slot,
indicesByBlockRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
peers map[goPeer.ID]bool,
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
// First, compute custody columns for all peers
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
for peer := range peers {
// Computes the custody columns for each peer
nodeID, err := prysmP2P.ConvertPeerIDToNodeID(peer)
if err != nil {
return nil, errors.Wrapf(err, "convert peer ID to node ID for peer %s", peer)
}
custodyGroupCount := p2p.CustodyGroupCountFromPeer(peer)
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
if err != nil {
return nil, errors.Wrapf(err, "peerdas info for peer %s", peer)
}
for column := range dasInfo.CustodyColumns {
if _, exists := peersByIndex[column]; !exists {
peersByIndex[column] = make(map[goPeer.ID]bool)
}
peersByIndex[column][peer] = true
}
// Compute the head slot for each peer
peerChainState, err := p2p.Peers().ChainState(peer)
if err != nil {
return nil, errors.Wrapf(err, "get chain state for peer %s", peer)
}
if peerChainState == nil {
return nil, errors.Errorf("chain state is nil for peer %s", peer)
}
headSlotByPeer[peer] = peerChainState.HeadSlot
}
// For each block root and its indices, find suitable peers
indicesByRootByPeer := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
for blockRoot, indices := range indicesByBlockRoot {
blockSlot, ok := slotByBlockRoot[blockRoot]
if !ok {
return nil, errors.Errorf("slot not found for block root %#x", blockRoot)
}
for index := range indices {
peers := peersByIndex[index]
for peer := range peers {
peerHeadSlot, ok := headSlotByPeer[peer]
if !ok {
return nil, errors.Errorf("head slot not found for peer %s", peer)
}
if peerHeadSlot < blockSlot {
continue
}
// Build peers->root->indices map
if _, exists := indicesByRootByPeer[peer]; !exists {
indicesByRootByPeer[peer] = make(map[[fieldparams.RootLength]byte]map[uint64]bool)
}
if _, exists := indicesByRootByPeer[peer][blockRoot]; !exists {
indicesByRootByPeer[peer][blockRoot] = make(map[uint64]bool)
}
indicesByRootByPeer[peer][blockRoot][index] = true
}
}
}
return indicesByRootByPeer, nil
}
// randomPeer selects a random peer. If no peers has enough bandwidth, it will wait and retry.
// Returns the selected peer ID and any error.
func randomPeer(
ctx context.Context,
randomSource *rand.Rand,
rateLimiter *leakybucket.Collector,
count int,
indicesByRootByPeer map[goPeer.ID]map[[32]byte]map[uint64]bool,
) (goPeer.ID, error) {
const waitPeriod = 5 * time.Second
peerCount := len(indicesByRootByPeer)
if peerCount == 0 {
return "", errors.New("no peers available")
}
for ctx.Err() == nil {
nonRateLimitedPeers := make([]goPeer.ID, 0, len(indicesByRootByPeer))
for peer := range indicesByRootByPeer {
// If no rate limiter is provided, treat all peers as non-rate-limited
if rateLimiter == nil {
nonRateLimitedPeers = append(nonRateLimitedPeers, peer)
} else {
remaining := rateLimiter.Remaining(peer.String())
if remaining >= int64(count) {
nonRateLimitedPeers = append(nonRateLimitedPeers, peer)
}
}
}
if len(nonRateLimitedPeers) == 0 {
log.WithFields(logrus.Fields{
"peerCount": peerCount,
"delay": waitPeriod,
}).Debug("Waiting for a peer with enough bandwidth for data column sidecars")
time.Sleep(waitPeriod)
continue
}
randomIndex := randomSource.Intn(len(nonRateLimitedPeers))
return nonRateLimitedPeers[randomIndex], nil
}
return "", ctx.Err()
}
// copyIndicesByRootByPeer creates a deep copy of the given nested map.
// Returns a new map with the same structure and contents.
func copyIndicesByRootByPeer(original map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool) map[goPeer.ID]map[[32]byte]map[uint64]bool {
copied := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
for peer, indicesByRoot := range original {
copied[peer] = copyIndicesByRoot(indicesByRoot)
}
return copied
}
// copyIndicesByRoot creates a deep copy of the given nested map.
// Returns a new map with the same structure and contents.
func copyIndicesByRoot(original map[[fieldparams.RootLength]byte]map[uint64]bool) map[[fieldparams.RootLength]byte]map[uint64]bool {
copied := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
for root, indexMap := range original {
copied[root] = make(map[uint64]bool, len(indexMap))
for index, value := range indexMap {
copied[root][index] = value
}
}
return copied
}
// compareIndices compares two map[uint64]bool and returns true if they are equal.
func compareIndices(left, right map[uint64]bool) bool {
if len(left) != len(right) {
return false
}
for key, leftValue := range left {
rightValue, exists := right[key]
if !exists || leftValue != rightValue {
return false
}
}
return true
}
// sortedSliceFromMap converts a map[uint64]bool to a sorted slice of keys.
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
result := make([]uint64, 0, len(m))
for k := range m {
result = append(result, k)
}
slices.Sort(result)
return result
}
// computeSlotByBlockRoot maps each block root to its corresponding slot.
func computeSlotByBlockRoot(roBlocks []blocks.ROBlock) map[[fieldparams.RootLength]byte]primitives.Slot {
slotByBlockRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, len(roBlocks))
for _, roBlock := range roBlocks {
slotByBlockRoot[roBlock.Root()] = roBlock.Block().Slot()
}
return slotByBlockRoot
}
// computeTotalCount calculates the total count of indices across all roots.
func computeTotalCount(input map[[fieldparams.RootLength]byte]map[uint64]bool) int {
totalCount := 0
for _, indices := range input {
totalCount += len(indices)
}
return totalCount
}

View File

@@ -0,0 +1,447 @@
package sync
import (
"context"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v6/crypto/rand"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/libp2p/go-libp2p/core/peer"
)
func TestBuildByRangeRequests(t *testing.T) {
const nullBatchSize = 0
t.Run("empty", func(t *testing.T) {
actual, err := buildByRangeRequests(nil, nil, nil, nullBatchSize)
require.NoError(t, err)
require.Equal(t, 0, len(actual))
})
t.Run("missing Root", func(t *testing.T) {
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
{1}: {1: true, 2: true},
}
_, err := buildByRangeRequests(nil, nil, indicesByRoot, nullBatchSize)
require.NotNil(t, err)
})
t.Run("indices differ", func(t *testing.T) {
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
{1}: {1: true, 2: true},
{2}: {1: true, 2: true},
{3}: {2: true, 3: true},
}
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
{1}: 1,
{2}: 2,
{3}: 3,
}
actual, err := buildByRangeRequests(slotByRoot, nil, indicesByRoot, nullBatchSize)
require.NoError(t, err)
require.Equal(t, 0, len(actual))
})
t.Run("slots non contiguous", func(t *testing.T) {
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
{1}: {1: true, 2: true},
{2}: {1: true, 2: true},
}
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
{1}: 1,
{2}: 3,
}
slotsWithCommitments := map[primitives.Slot]bool{
1: true,
2: true,
3: true,
}
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, nullBatchSize)
require.NoError(t, err)
require.Equal(t, 0, len(actual))
})
t.Run("nominal", func(t *testing.T) {
const batchSize = 3
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
{1}: {1: true, 2: true},
{3}: {1: true, 2: true},
{4}: {1: true, 2: true},
{7}: {1: true, 2: true},
}
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
{1}: 1,
{3}: 3,
{4}: 4,
{7}: 7,
}
slotsWithCommitments := map[primitives.Slot]bool{
1: true,
3: true,
4: true,
7: true,
}
expected := []*ethpb.DataColumnSidecarsByRangeRequest{
{
StartSlot: 1,
Count: 3,
Columns: []uint64{1, 2},
},
{
StartSlot: 4,
Count: 3,
Columns: []uint64{1, 2},
},
{
StartSlot: 7,
Count: 1,
Columns: []uint64{1, 2},
},
}
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
require.NoError(t, err)
require.DeepEqual(t, expected, actual)
})
}
func TestBuildByRootRequest(t *testing.T) {
root1 := [fieldparams.RootLength]byte{1}
root2 := [fieldparams.RootLength]byte{2}
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
root1: {1: true, 2: true},
root2: {3: true},
}
expected := p2ptypes.DataColumnsByRootIdentifiers{
{
BlockRoot: root1[:],
Columns: []uint64{1, 2},
},
{
BlockRoot: root2[:],
Columns: []uint64{3},
},
}
actual := buildByRootRequest(input)
require.DeepEqual(t, expected, actual)
}
func TestVerifyDataColumnSidecarsByPeer(t *testing.T) {
err := kzg.Start()
require.NoError(t, err)
t.Run("nominal", func(t *testing.T) {
const (
start, stop = 0, 15
blobCount = 1
)
p2p := testp2p.NewTestP2P(t)
// Setup test data and expectations
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
"peer1": roDataColumnSidecars[start:5],
"peer2": roDataColumnSidecars[5:9],
"peer3": roDataColumnSidecars[9:stop],
}
gs := startup.NewClockSynchronizer()
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
require.NoError(t, err)
waiter := verification.NewInitializerWaiter(gs, nil, nil)
initializer, err := waiter.WaitForInitializer(t.Context())
require.NoError(t, err)
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
require.NoError(t, err)
require.Equal(t, stop-start, len(actual))
for i := range actual {
actualSidecar := actual[i]
index := actualSidecar.Index
expectedSidecar := expected[index]
require.DeepEqual(t, expectedSidecar, actualSidecar)
}
})
t.Run("one rogue peer", func(t *testing.T) {
const (
start, middle, stop = 0, 5, 15
blobCount = 1
)
p2p := testp2p.NewTestP2P(t)
// Setup test data and expectations
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
// Modify one sidecar to ensure proof verification fails.
if roDataColumnSidecars[middle].KzgProofs[0][0] == 0 {
roDataColumnSidecars[middle].KzgProofs[0][0]++
} else {
roDataColumnSidecars[middle].KzgProofs[0][0]--
}
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
"peer1": roDataColumnSidecars[start:middle],
"peer2": roDataColumnSidecars[5:middle],
"peer3": roDataColumnSidecars[middle:stop],
}
gs := startup.NewClockSynchronizer()
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
require.NoError(t, err)
waiter := verification.NewInitializerWaiter(gs, nil, nil)
initializer, err := waiter.WaitForInitializer(t.Context())
require.NoError(t, err)
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
require.NoError(t, err)
require.Equal(t, middle-start, len(actual))
for i := range actual {
actualSidecar := actual[i]
index := actualSidecar.Index
expectedSidecar := expected[index]
require.DeepEqual(t, expectedSidecar, actualSidecar)
}
})
}
func TestComputeIndicesByRootByPeer(t *testing.T) {
peerIdStrs := []string{
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq", // Custodies 89, 94, 97 & 122
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf", // Custodies 1, 11, 37 & 86
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8", // Custodies 2, 37, 38 & 68
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx", // Custodies 10, 29, 36 & 108
}
headSlotByPeer := map[string]primitives.Slot{
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq": 89,
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf": 10,
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8": 12,
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx": 9,
}
p2p := testp2p.NewTestP2P(t)
peers := p2p.Peers()
peerIDs := make([]peer.ID, 0, len(peerIdStrs))
for _, peerIdStr := range peerIdStrs {
peerID, err := peer.Decode(peerIdStr)
require.NoError(t, err)
peers.SetChainState(peerID, &ethpb.StatusV2{
HeadSlot: headSlotByPeer[peerIdStr],
})
peerIDs = append(peerIDs, peerID)
}
slotByBlockRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
[fieldparams.RootLength]byte{1}: 8,
[fieldparams.RootLength]byte{2}: 10,
[fieldparams.RootLength]byte{3}: 9,
[fieldparams.RootLength]byte{4}: 50,
}
indicesByBlockRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
[fieldparams.RootLength]byte{1}: {3: true, 4: true, 5: true},
[fieldparams.RootLength]byte{2}: {1: true, 10: true, 37: true, 80: true},
[fieldparams.RootLength]byte{3}: {10: true, 38: true, 39: true, 40: true},
[fieldparams.RootLength]byte{4}: {89: true, 108: true, 122: true},
}
expected := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
peerIDs[0]: {
[fieldparams.RootLength]byte{4}: {89: true, 122: true},
},
peerIDs[1]: {
[fieldparams.RootLength]byte{2}: {1: true, 37: true},
},
peerIDs[2]: {
[fieldparams.RootLength]byte{2}: {37: true},
[fieldparams.RootLength]byte{3}: {38: true},
},
peerIDs[3]: {
[fieldparams.RootLength]byte{3}: {10: true},
},
}
peerIDsMap := make(map[peer.ID]bool, len(peerIDs))
for _, id := range peerIDs {
peerIDsMap[id] = true
}
actual, err := computeIndicesByRootByPeer(p2p, slotByBlockRoot, indicesByBlockRoot, peerIDsMap)
require.NoError(t, err)
require.Equal(t, len(expected), len(actual))
for peer, indicesByRoot := range expected {
require.Equal(t, len(indicesByRoot), len(actual[peer]))
for root, indices := range indicesByRoot {
require.Equal(t, len(indices), len(actual[peer][root]))
for index := range indices {
require.Equal(t, actual[peer][root][index], true)
}
}
}
}
func TestRandomPeer(t *testing.T) {
randomSource := rand.NewGenerator()
t.Run("no peers", func(t *testing.T) {
pid, err := randomPeer(t.Context(), randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, nil)
require.NotNil(t, err)
require.Equal(t, peer.ID(""), pid)
})
t.Run("context cancelled", func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
cancel()
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{peer.ID("peer1"): {}}
pid, err := randomPeer(ctx, randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, indicesByRootByPeer)
require.NotNil(t, err)
require.Equal(t, peer.ID(""), pid)
})
t.Run("nominal", func(t *testing.T) {
const count = 1
collector := leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */)
peer1, peer2, peer3 := peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3")
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
peer1: {},
peer2: {},
peer3: {},
}
pid, err := randomPeer(t.Context(), randomSource, collector, count, indicesByRootByPeer)
require.NoError(t, err)
require.Equal(t, true, map[peer.ID]bool{peer1: true, peer2: true, peer3: true}[pid])
})
}
func TestCopyIndicesByRootByPeer(t *testing.T) {
original := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
peer.ID("peer1"): {
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
[fieldparams.RootLength]byte{2}: {2: true},
},
peer.ID("peer2"): {
[fieldparams.RootLength]byte{1}: {1: true},
},
}
copied := copyIndicesByRootByPeer(original)
require.Equal(t, len(original), len(copied))
for peer, indicesByRoot := range original {
require.Equal(t, len(indicesByRoot), len(copied[peer]))
for root, indices := range indicesByRoot {
require.Equal(t, len(indices), len(copied[peer][root]))
for index := range indices {
require.Equal(t, copied[peer][root][index], true)
}
}
}
}
func TestCompareIndices(t *testing.T) {
left := map[uint64]bool{3: true, 5: true, 7: true}
right := map[uint64]bool{5: true}
require.Equal(t, false, compareIndices(left, right))
left = map[uint64]bool{3: true, 5: true, 7: true}
right = map[uint64]bool{3: true, 6: true, 7: true}
require.Equal(t, false, compareIndices(left, right))
left = map[uint64]bool{3: true, 5: true, 7: true}
right = map[uint64]bool{5: true, 7: true, 3: true}
require.Equal(t, true, compareIndices(left, right))
}
func TestSlortedSliceFromMap(t *testing.T) {
input := map[uint64]bool{54: true, 23: true, 35: true}
expected := []uint64{23, 35, 54}
actual := sortedSliceFromMap(input)
require.DeepEqual(t, expected, actual)
}
func TestComputeSlotByBlockRoot(t *testing.T) {
const (
count = 3
multiplier = 10
)
roBlocks := make([]blocks.ROBlock, 0, count)
for i := range count {
signedBlock := util.NewBeaconBlock()
signedBlock.Block.Slot = primitives.Slot(i).Mul(multiplier)
roSignedBlock, err := blocks.NewSignedBeaconBlock(signedBlock)
require.NoError(t, err)
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, [fieldparams.RootLength]byte{byte(i)})
require.NoError(t, err)
roBlocks = append(roBlocks, roBlock)
}
expected := map[[fieldparams.RootLength]byte]primitives.Slot{
[fieldparams.RootLength]byte{0}: primitives.Slot(0),
[fieldparams.RootLength]byte{1}: primitives.Slot(10),
[fieldparams.RootLength]byte{2}: primitives.Slot(20),
}
actual := computeSlotByBlockRoot(roBlocks)
require.Equal(t, len(expected), len(actual))
for k, v := range expected {
require.Equal(t, v, actual[k])
}
}
func TestComputeTotalCount(t *testing.T) {
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
[fieldparams.RootLength]byte{2}: {2: true},
}
const expected = 3
actual := computeTotalCount(input)
require.Equal(t, expected, actual)
}

View File

@@ -127,7 +127,7 @@ func (s *Service) scheduleMissingDataColumnSidecarsBroadcast(
})
// Get the time corresponding to the start of the slot.
genesisTime := s.cfg.chain.GenesisTime()
genesisTime := s.cfg.clock.GenesisTime()
slotStartTime, err := slots.StartTime(genesisTime, slot)
if err != nil {
return errors.Wrap(err, "failed to calculate slot start time")

View File

@@ -1,12 +1,10 @@
package sync
import (
"fmt"
"reflect"
"strings"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
@@ -19,7 +17,6 @@ import (
)
var errNilPubsubMessage = errors.New("nil pubsub message")
var errInvalidTopic = errors.New("invalid topic format")
func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, error) {
if msg == nil || msg.Topic == nil || *msg.Topic == "" {
@@ -75,7 +72,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
func (*Service) replaceForkDigest(topic string) (string, error) {
subStrings := strings.Split(topic, "/")
if len(subStrings) != 4 {
return "", errInvalidTopic
return "", p2p.ErrInvalidTopic
}
subStrings[2] = "%x"
return strings.Join(subStrings, "/"), nil
@@ -105,29 +102,21 @@ func extractDataTypeFromTypeMap[T any](typeMap map[[4]byte]func() (T, error), di
if len(digest) == 0 {
f, ok := typeMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
if !ok {
return zero, fmt.Errorf("no %T type exists for the genesis fork version", zero)
return zero, errors.Wrapf(errInvalidDigest, "no %T type exists for the genesis fork version", zero)
}
return f()
}
if len(digest) != forkDigestLength {
return zero, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
return zero, errors.Wrapf(errInvalidDigest, "invalid digest returned, wanted a length of %d but received %d", forkDigestLength, len(digest))
}
vRoot := tor.GenesisValidatorsRoot()
for k, f := range typeMap {
rDigest, err := signing.ComputeForkDigest(k[:], vRoot[:])
if err != nil {
return zero, err
}
if rDigest == bytesutil.ToBytes4(digest) {
return f()
}
forkVersion, _, err := params.ForkDataFromDigest([4]byte(digest))
if err != nil {
return zero, errors.Wrapf(ErrNoValidDigest, "could not extract %T data type, saw digest=%#x", zero, digest)
}
return zero, errors.Wrapf(
ErrNoValidDigest,
"could not extract %T data type, saw digest=%#x, genesis=%v, vr=%#x",
zero,
digest,
tor.GenesisTime(),
tor.GenesisValidatorsRoot(),
)
f, ok := typeMap[forkVersion]
if ok {
return f()
}
return zero, errors.Wrapf(ErrNoValidDigest, "could not extract %T data type, saw digest=%#x", zero, digest)
}

View File

@@ -4,13 +4,11 @@ import (
"bytes"
"fmt"
"reflect"
"strings"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
p2ptesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
@@ -30,8 +28,9 @@ import (
)
func TestService_decodePubsubMessage(t *testing.T) {
digest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, make([]byte, 32))
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
entry := params.GetNetworkScheduleEntry(params.BeaconConfig().GenesisEpoch)
tests := []struct {
name string
topic string
@@ -56,7 +55,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
{
name: "invalid topic format",
topic: "foo",
wantErr: errInvalidTopic,
wantErr: p2p.ErrInvalidTopic,
},
{
name: "topic not mapped to any message type",
@@ -65,7 +64,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
},
{
name: "valid message -- beacon block",
topic: fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlock{})], digest),
topic: fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(&ethpb.SignedBeaconBlock{})], entry.ForkDigest),
input: &pubsub.Message{
Message: &pb.Message{
Data: func() []byte {
@@ -102,10 +101,11 @@ func TestService_decodePubsubMessage(t *testing.T) {
tt.input.Message.Topic = &topic
}
got, err := s.decodePubsubMessage(tt.input)
if err != nil && err != tt.wantErr && !strings.Contains(err.Error(), tt.wantErr.Error()) {
t.Errorf("decodePubsubMessage() error = %v, wantErr %v", err, tt.wantErr)
if tt.wantErr != nil {
require.ErrorIs(t, err, tt.wantErr, "decodePubsubMessage() error mismatch")
return
}
require.NoError(t, err, "decodePubsubMessage() unexpected error")
if !reflect.DeepEqual(got, tt.want) {
diff, _ := messagediff.PrettyDiff(got, tt.want)
t.Log(diff)
@@ -116,24 +116,11 @@ func TestService_decodePubsubMessage(t *testing.T) {
}
func TestExtractDataType(t *testing.T) {
// Precompute digests
genDigest, err := signing.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
altairDigest, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
bellatrixDigest, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
capellaDigest, err := signing.ComputeForkDigest(params.BeaconConfig().CapellaForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
denebDigest, err := signing.ComputeForkDigest(params.BeaconConfig().DenebForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
electraDigest, err := signing.ComputeForkDigest(params.BeaconConfig().ElectraForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
fuluDigest, err := signing.ComputeForkDigest(params.BeaconConfig().FuluForkVersion, params.BeaconConfig().ZeroHash[:])
require.NoError(t, err)
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
type args struct {
digest []byte
digest [4]byte
chain blockchain.ChainInfoFetcher
}
tests := []struct {
@@ -146,40 +133,10 @@ func TestExtractDataType(t *testing.T) {
wantAttSlashing ethpb.AttSlashing
wantErr bool
}{
{
name: "no digest",
args: args{
digest: []byte{},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
wsb, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return wsb
}(),
wantMd: wrapper.WrappedMetadataV0(&ethpb.MetaDataV0{}),
wantAtt: &ethpb.Attestation{},
wantAggregate: &ethpb.SignedAggregateAttestationAndProof{},
wantAttSlashing: &ethpb.AttesterSlashing{},
wantErr: false,
},
{
name: "invalid digest",
args: args{
digest: []byte{0x00, 0x01},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: nil,
wantMd: nil,
wantAtt: nil,
wantAggregate: nil,
wantAttSlashing: nil,
wantErr: true,
},
{
name: "non existent digest",
args: args{
digest: []byte{0x00, 0x01, 0x02, 0x03},
digest: [4]byte{},
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: nil,
@@ -192,7 +149,7 @@ func TestExtractDataType(t *testing.T) {
{
name: "genesis fork version",
args: args{
digest: genDigest[:],
digest: params.ForkDigest(params.BeaconConfig().GenesisEpoch),
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
@@ -208,7 +165,7 @@ func TestExtractDataType(t *testing.T) {
{
name: "altair fork version",
args: args{
digest: altairDigest[:],
digest: params.ForkDigest(params.BeaconConfig().AltairForkEpoch),
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
@@ -225,7 +182,7 @@ func TestExtractDataType(t *testing.T) {
{
name: "bellatrix fork version",
args: args{
digest: bellatrixDigest[:],
digest: params.ForkDigest(params.BeaconConfig().BellatrixForkEpoch),
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
@@ -242,7 +199,7 @@ func TestExtractDataType(t *testing.T) {
{
name: "capella fork version",
args: args{
digest: capellaDigest[:],
digest: params.ForkDigest(params.BeaconConfig().CapellaForkEpoch),
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
@@ -259,7 +216,7 @@ func TestExtractDataType(t *testing.T) {
{
name: "deneb fork version",
args: args{
digest: denebDigest[:],
digest: params.ForkDigest(params.BeaconConfig().DenebForkEpoch),
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
@@ -276,7 +233,7 @@ func TestExtractDataType(t *testing.T) {
{
name: "electra fork version",
args: args{
digest: electraDigest[:],
digest: params.ForkDigest(params.BeaconConfig().ElectraForkEpoch),
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
@@ -293,7 +250,7 @@ func TestExtractDataType(t *testing.T) {
{
name: "fulu fork version",
args: args{
digest: fuluDigest[:],
digest: params.ForkDigest(params.BeaconConfig().FuluForkEpoch),
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
},
wantBlock: func() interfaces.ReadOnlySignedBeaconBlock {
@@ -310,7 +267,7 @@ func TestExtractDataType(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest, tt.args.chain)
gotBlock, err := extractDataTypeFromTypeMap(types.BlockMap, tt.args.digest[:], tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("block: error = %v, wantErr %v", err, tt.wantErr)
return
@@ -318,7 +275,7 @@ func TestExtractDataType(t *testing.T) {
if !reflect.DeepEqual(gotBlock, tt.wantBlock) {
t.Errorf("block: got = %v, want %v", gotBlock, tt.wantBlock)
}
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest, tt.args.chain)
gotAtt, err := extractDataTypeFromTypeMap(types.AttestationMap, tt.args.digest[:], tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("attestation: error = %v, wantErr %v", err, tt.wantErr)
return
@@ -326,7 +283,7 @@ func TestExtractDataType(t *testing.T) {
if !reflect.DeepEqual(gotAtt, tt.wantAtt) {
t.Errorf("attestation: got = %v, want %v", gotAtt, tt.wantAtt)
}
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest, tt.args.chain)
gotAggregate, err := extractDataTypeFromTypeMap(types.AggregateAttestationMap, tt.args.digest[:], tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("aggregate: error = %v, wantErr %v", err, tt.wantErr)
return
@@ -334,7 +291,7 @@ func TestExtractDataType(t *testing.T) {
if !reflect.DeepEqual(gotAggregate, tt.wantAggregate) {
t.Errorf("aggregate: got = %v, want %v", gotAggregate, tt.wantAggregate)
}
gotAttSlashing, err := extractDataTypeFromTypeMap(types.AttesterSlashingMap, tt.args.digest, tt.args.chain)
gotAttSlashing, err := extractDataTypeFromTypeMap(types.AttesterSlashingMap, tt.args.digest[:], tt.args.chain)
if (err != nil) != tt.wantErr {
t.Errorf("attester slashing: error = %v, wantErr %v", err, tt.wantErr)
return
@@ -345,3 +302,11 @@ func TestExtractDataType(t *testing.T) {
})
}
}
func TestExtractDataTypeFromTypeMapInvalid(t *testing.T) {
chain := &mock.ChainService{ValidatorsRoot: [32]byte{}}
_, err := extractDataTypeFromTypeMap(types.BlockMap, []byte{0x00, 0x01}, chain)
require.ErrorIs(t, err, errInvalidDigest)
_, err = extractDataTypeFromTypeMap(types.AttestationMap, []byte{0x00, 0x01}, chain)
require.ErrorIs(t, err, errInvalidDigest)
}

View File

@@ -4,7 +4,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/pkg/errors"
@@ -42,67 +41,46 @@ func (s *Service) forkWatcher() {
// registerForUpcomingFork registers appropriate gossip and RPC topic if there is a fork in the next epoch.
func (s *Service) registerForUpcomingFork(currentEpoch primitives.Epoch) error {
// Get the genesis validators root.
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
nextEntry := params.GetNetworkScheduleEntry(currentEpoch + 1)
// Check if there is a fork in the next epoch.
isForkNextEpoch, err := forks.IsForkNextEpoch(s.cfg.clock.GenesisTime(), genesisValidatorsRoot[:])
if err != nil {
return errors.Wrap(err, "Could not retrieve next fork epoch")
}
// Exit early if there is no fork in the next epoch.
if !isForkNextEpoch {
if nextEntry.ForkDigest == s.registeredNetworkEntry.ForkDigest {
return nil
}
beforeForkEpoch := currentEpoch
forkEpoch := beforeForkEpoch + 1
// Get the fork afterForkDigest for the next epoch.
afterForkDigest, err := forks.ForkDigestFromEpoch(forkEpoch, genesisValidatorsRoot[:])
if err != nil {
return errors.Wrap(err, "could not retrieve fork digest")
}
// Exit early if the topics for the next epoch are already registered.
// It likely to be the case for all slots of the epoch that are not the first one.
if s.subHandler.digestExists(afterForkDigest) {
if s.subHandler.digestExists(nextEntry.ForkDigest) {
return nil
}
// Register the subscribers (gossipsub) for the next epoch.
s.registerSubscribers(forkEpoch, afterForkDigest)
s.registerSubscribers(nextEntry.Epoch, nextEntry.ForkDigest)
// Get the handlers for the current and next fork.
beforeForkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(beforeForkEpoch)
currentHandler, err := s.rpcHandlerByTopicFromEpoch(currentEpoch)
if err != nil {
return errors.Wrap(err, "RPC handler by topic from before fork epoch")
}
forkHandlerByTopic, err := s.rpcHandlerByTopicFromEpoch(forkEpoch)
nextHandler, err := s.rpcHandlerByTopicFromEpoch(nextEntry.Epoch)
if err != nil {
return errors.Wrap(err, "RPC handler by topic from fork epoch")
}
// Compute newly added topics.
newRPCHandlerByTopic := addedRPCHandlerByTopic(beforeForkHandlerByTopic, forkHandlerByTopic)
newHandlersByTopic := addedRPCHandlerByTopic(currentHandler, nextHandler)
// Register the new RPC handlers.
for topic, handler := range newRPCHandlerByTopic {
for topic, handler := range newHandlersByTopic {
s.registerRPC(topic, handler)
}
s.registeredNetworkEntry = nextEntry
return nil
}
// deregisterFromPastFork deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
func (s *Service) deregisterFromPastFork(currentEpoch primitives.Epoch) error {
// Extract the genesis validators root.
genesisValidatorsRoot := s.cfg.clock.GenesisValidatorsRoot()
// Get the fork.
currentFork, err := forks.Fork(currentEpoch)
currentFork, err := params.Fork(currentEpoch)
if err != nil {
return errors.Wrap(err, "genesis validators root")
}
@@ -123,10 +101,7 @@ func (s *Service) deregisterFromPastFork(currentEpoch primitives.Epoch) error {
// Look at the previous fork's digest.
beforeForkEpoch := currentFork.Epoch - 1
beforeForkDigest, err := forks.ForkDigestFromEpoch(beforeForkEpoch, genesisValidatorsRoot[:])
if err != nil {
return errors.Wrap(err, "fork digest from epoch")
}
beforeForkDigest := params.ForkDigest(beforeForkEpoch)
// Exit early if there are no topics with that particular digest.
if !s.subHandler.digestExists(beforeForkDigest) {

View File

@@ -14,7 +14,6 @@ import (
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/assert"
)
@@ -91,9 +90,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
currEpoch: 4,
wantErr: false,
postSvcCheck: func(t *testing.T, s *Service) {
genRoot := s.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
assert.NoError(t, err)
digest := params.ForkDigest(5)
assert.Equal(t, true, s.subHandler.digestExists(digest))
rpcMap := make(map[string]bool)
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
@@ -135,9 +132,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
currEpoch: 4,
wantErr: false,
postSvcCheck: func(t *testing.T, s *Service) {
genRoot := s.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
assert.NoError(t, err)
digest := params.ForkDigest(5)
assert.Equal(t, true, s.subHandler.digestExists(digest))
rpcMap := make(map[string]bool)
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
@@ -177,9 +172,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
currEpoch: 4,
wantErr: false,
postSvcCheck: func(t *testing.T, s *Service) {
genRoot := s.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
assert.NoError(t, err)
digest := params.ForkDigest(5)
assert.Equal(t, true, s.subHandler.digestExists(digest))
rpcMap := make(map[string]bool)
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
@@ -221,9 +214,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
currEpoch: 4,
wantErr: false,
postSvcCheck: func(t *testing.T, s *Service) {
genRoot := s.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
assert.NoError(t, err)
digest := params.ForkDigest(5)
assert.Equal(t, true, s.subHandler.digestExists(digest))
rpcMap := make(map[string]bool)
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
@@ -266,9 +257,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
currEpoch: 4,
wantErr: false,
postSvcCheck: func(t *testing.T, s *Service) {
genRoot := s.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(5, genRoot[:])
assert.NoError(t, err)
digest := params.ForkDigest(5)
assert.Equal(t, true, s.subHandler.digestExists(digest))
rpcMap := make(map[string]bool)
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
@@ -387,14 +376,12 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
r.registerRPC(topic, handler)
}
genRoot := r.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(0, genRoot[:])
digest := params.ForkDigest(0)
assert.NoError(t, err)
r.registerSubscribers(0, digest)
assert.Equal(t, true, r.subHandler.digestExists(digest))
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
assert.NoError(t, err)
digest = params.ForkDigest(3)
r.registerSubscribers(3, digest)
assert.Equal(t, true, r.subHandler.digestExists(digest))
@@ -403,12 +390,9 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
currEpoch: 4,
wantErr: false,
postSvcCheck: func(t *testing.T, s *Service) {
genRoot := s.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(0, genRoot[:])
assert.NoError(t, err)
digest := params.ForkDigest(0)
assert.Equal(t, false, s.subHandler.digestExists(digest))
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
assert.NoError(t, err)
digest = params.ForkDigest(3)
assert.Equal(t, true, s.subHandler.digestExists(digest))
ptcls := s.cfg.p2p.Host().Mux().Protocols()
@@ -455,14 +439,11 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
chainStarted: abool.New(),
subHandler: newSubTopicHandler(),
}
genRoot := r.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(1, genRoot[:])
assert.NoError(t, err)
digest := params.ForkDigest(1)
r.registerSubscribers(1, digest)
assert.Equal(t, true, r.subHandler.digestExists(digest))
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
assert.NoError(t, err)
digest = params.ForkDigest(3)
r.registerSubscribers(3, digest)
assert.Equal(t, true, r.subHandler.digestExists(digest))
@@ -471,12 +452,9 @@ func TestService_CheckForPreviousEpochFork(t *testing.T) {
currEpoch: 4,
wantErr: false,
postSvcCheck: func(t *testing.T, s *Service) {
genRoot := s.cfg.clock.GenesisValidatorsRoot()
digest, err := forks.ForkDigestFromEpoch(1, genRoot[:])
assert.NoError(t, err)
digest := params.ForkDigest(1)
assert.Equal(t, false, s.subHandler.digestExists(digest))
digest, err = forks.ForkDigestFromEpoch(3, genRoot[:])
assert.NoError(t, err)
digest = params.ForkDigest(3)
assert.Equal(t, true, s.subHandler.digestExists(digest))
},
},

View File

@@ -1,21 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"api.go",
"file.go",
"log.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/genesis",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon:go_default_library",
"//beacon-chain/db:go_default_library",
"//crypto/hash:go_default_library",
"//io/file:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

Some files were not shown because too many files have changed in this diff Show More