mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
5 Commits
fusaka-dev
...
hashtree-i
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b00cacef4 | ||
|
|
6f89cb5330 | ||
|
|
e315f78b97 | ||
|
|
536df4b682 | ||
|
|
1b98d8c202 |
31
BUILD.bazel
31
BUILD.bazel
@@ -1,8 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@bazel_gazelle//:def.bzl", "gazelle")
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "nogo")
|
||||
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
|
||||
load("@prysm//tools/nogo_config:def.bzl", "nogo_config_exclude")
|
||||
|
||||
@@ -282,3 +283,31 @@ sh_binary(
|
||||
srcs = ["prysm.sh"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"compare_states.go",
|
||||
"reproduce_bug.go",
|
||||
"verify_fix.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "v6",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
@@ -415,6 +415,10 @@ load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
|
||||
|
||||
bls_dependencies()
|
||||
|
||||
load("@prysm//third_party/hashtree:hashtree.bzl", "hashtree_dependencies")
|
||||
|
||||
hashtree_dependencies()
|
||||
|
||||
load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
|
||||
|
||||
e2e_deps()
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -135,6 +137,24 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -314,3 +334,31 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -36,7 +36,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/validator"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -700,11 +699,6 @@ func (m *SyncCommitteeMessage) ToConsensus() (*eth.SyncCommitteeMessage, error)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
// Add validation to check if the signature is valid BLS format
|
||||
_, err = bls.SignatureFromBytes(sig)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SyncCommitteeMessage{
|
||||
Slot: primitives.Slot(slot),
|
||||
|
||||
@@ -73,7 +73,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -182,7 +181,6 @@ go_test(
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -196,7 +194,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -625,7 +624,6 @@ func Test_hashForGenesisRoot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
|
||||
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
|
||||
root, err := beaconDB.GenesisBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -174,7 +174,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), arg.headBlock, arg.headRoot, nextSlot)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -310,7 +309,6 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
block: wba,
|
||||
}
|
||||
|
||||
genesis.StoreStateDuringTest(t, st)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := &fcuConfig{
|
||||
@@ -405,7 +403,6 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
|
||||
@@ -102,6 +102,8 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -234,6 +235,14 @@ func WithSyncChecker(checker Checker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustodyInfo sets the custody info for the blockchain service.
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.CustodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled sets whether the slasher is enabled or not.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -666,9 +666,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS.
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
@@ -694,15 +692,13 @@ func (s *Service) areDataColumnsAvailable(
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.CustodyInfo.Mut.RLock()
|
||||
defer s.cfg.CustodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupCount := s.cfg.P2P.CustodyGroupCount()
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#custody-sampling
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -1981,15 +1980,14 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
genesisState, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
gb := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(gb)
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := gb.Block.HashTreeRoot()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genesisState)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
@@ -2896,6 +2894,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
@@ -2908,6 +2907,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
t.Run("Fulu - no missing data columns", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
@@ -2922,7 +2922,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: []uint64{1, 17, 19, 75, 102, 117, 119}, // 119 is not needed, 42 and 87 are missing
|
||||
|
||||
blobKzgCommitmentsCount: 3,
|
||||
@@ -2959,9 +2959,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -2974,6 +2971,10 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
|
||||
|
||||
@@ -2982,14 +2983,12 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithCustodyInfo(&custodyInfo), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
_, _, err := service.cfg.P2P.UpdateCustodyInfo(0, cgc)
|
||||
require.NoError(t, err)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
@@ -3021,9 +3020,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -3032,7 +3028,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
|
||||
@@ -12,9 +12,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -29,8 +31,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -97,6 +97,7 @@ type config struct {
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
SyncChecker Checker
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Checker is an interface used to determine if a node is in initial sync
|
||||
@@ -110,26 +111,22 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
seenIndex map[[32]byte][]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -140,9 +137,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -152,15 +147,12 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -186,9 +178,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
@@ -218,9 +208,17 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
defer s.removeStartupState()
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := s.startFromExecutionChain(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
@@ -269,9 +267,6 @@ func (s *Service) Status() error {
|
||||
|
||||
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if state.IsNil(saved) {
|
||||
return errors.New("Last finalized state at startup is nil")
|
||||
}
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = saved.GenesisTime()
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
@@ -301,16 +296,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -373,6 +358,62 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startFromExecutionChain() error {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
return errors.New("not configured execution chain")
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case e := <-stateChannel:
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("Event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state forRoot failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not initialize beacon chain")
|
||||
}
|
||||
// We start a counter to genesis, if needed.
|
||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not hash tree root genesis state")
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("Failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
@@ -475,57 +516,6 @@ func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
custodyRequirement := beaconConfig.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = beaconConfig.NumberOfColumns
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
custodyGroupCount, earliestAvailableSlot, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, custodyGroupCount, slot)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
@@ -542,19 +532,3 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -52,7 +51,6 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
srv.Stop()
|
||||
})
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
@@ -73,22 +71,20 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err = execution.NewService(
|
||||
ctx,
|
||||
execution.WithDatabase(beaconDB),
|
||||
execution.WithHttpEndpoint(endpoint),
|
||||
execution.WithDepositContractAddress(common.Address{}),
|
||||
execution.WithDepositCache(depositCache),
|
||||
)
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
@@ -400,6 +396,24 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
mgs := &MockClockSetter{}
|
||||
service.clockSetter = mgs
|
||||
gt := time.Now()
|
||||
service.onExecutionChainStart(t.Context(), gt)
|
||||
gs, err := beaconDB.GenesisState(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, nil, gs)
|
||||
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
|
||||
var zero [32]byte
|
||||
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
|
||||
require.Equal(t, gt, mgs.G.GenesisTime())
|
||||
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
ctx := b.Context()
|
||||
s := testServiceWithDB(b)
|
||||
@@ -554,9 +568,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -55,7 +54,6 @@ type mockBroadcaster struct {
|
||||
|
||||
type mockAccessor struct {
|
||||
mockBroadcaster
|
||||
mockDataColumnsHandler
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
@@ -99,43 +97,6 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
// mockDataColumnsHandler is a mock implementation of p2p.DataColumnsHandler
|
||||
type mockDataColumnsHandler struct {
|
||||
mut sync.RWMutex
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockDataColumnsHandler) EarliestAvailableSlot() primitives.Slot {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot
|
||||
}
|
||||
|
||||
func (dch *mockDataColumnsHandler) CustodyGroupCount() uint64 {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.custodyGroupCount
|
||||
}
|
||||
|
||||
func (dch *mockDataColumnsHandler) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
dch.mut.Lock()
|
||||
defer dch.mut.Unlock()
|
||||
|
||||
dch.earliestAvailableSlot = earliestAvailableSlot
|
||||
dch.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockDataColumnsHandler) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ p2p.DataColumnsHandler = (*mockDataColumnsHandler)(nil)
|
||||
|
||||
type testServiceRequirements struct {
|
||||
ctx context.Context
|
||||
db db.Database
|
||||
|
||||
@@ -41,6 +41,7 @@ go_library(
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -95,30 +96,12 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -78,7 +78,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -265,7 +264,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -52,6 +53,7 @@ go_test(
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -176,6 +176,19 @@ func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
return columnIndex % numberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#custody-sampling
|
||||
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
|
||||
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
if ct == Actual {
|
||||
custodyGroupCount = custodyInfo.ActualGroupCount()
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
return max(samplesPerSlot, custodyGroupCount)
|
||||
}
|
||||
|
||||
// CustodyColumns computes the custody columns from the custody groups.
|
||||
func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
@@ -104,6 +104,62 @@ func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupSamplingSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
custodyType peerdas.CustodyType
|
||||
validatorsCustodyRequirement uint64
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "target, lower than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 2,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "target, higher than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "actual, lower than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 3,
|
||||
toAdvertiseCustodyGroupCount: 4,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "actual, higher than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 100,
|
||||
toAdvertiseCustodyGroupCount: 101,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a custody info.
|
||||
custodyInfo := peerdas.CustodyInfo{}
|
||||
|
||||
// Set the validators custody requirement for target custody group count.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Compute the custody group sampling size.
|
||||
actual := custodyInfo.CustodyGroupSamplingSize(tc.custodyType)
|
||||
|
||||
// Check the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyColumns(t *testing.T) {
|
||||
t.Run("group too large", func(t *testing.T) {
|
||||
_, err := peerdas.CustodyColumns([]uint64{1_000_000})
|
||||
|
||||
@@ -4,17 +4,45 @@ import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CustodyInfo contains all useful peerDAS related information regarding a peer.
|
||||
type CustodyInfo struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
// info contains all useful peerDAS related information regarding a peer.
|
||||
type (
|
||||
info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
targetCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
validatorsCustodyRequirement uint64
|
||||
}
|
||||
|
||||
toAdverstiseCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
value uint64
|
||||
}
|
||||
|
||||
CustodyInfo struct {
|
||||
// Mut is a mutex to be used by caller to ensure neither
|
||||
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
|
||||
// (This is not necessary to use this mutex for any data protection.)
|
||||
Mut sync.RWMutex
|
||||
|
||||
// TargetGroupCount represents the target number of custody groups we should custody
|
||||
// regarding the validators we are tracking.
|
||||
TargetGroupCount targetCustodyGroupCount
|
||||
|
||||
// ToAdvertiseGroupCount represents the number of custody groups to advertise to the network.
|
||||
ToAdvertiseGroupCount toAdverstiseCustodyGroupCount
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
nodeInfoCacheSize = 200
|
||||
@@ -28,7 +56,7 @@ var (
|
||||
|
||||
// Info returns the peerDAS information for a given nodeID and custodyGroupCount.
|
||||
// It returns a boolean indicating if the peer info was already in the cache and an error if any.
|
||||
func Info(nodeID enode.ID, custodyGroupCount uint64) (*CustodyInfo, bool, error) {
|
||||
func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
|
||||
// Create a new cache if it doesn't exist.
|
||||
if err := createInfoCacheIfNeeded(); err != nil {
|
||||
return nil, false, errors.Wrap(err, "create cache if needed")
|
||||
@@ -39,7 +67,7 @@ func Info(nodeID enode.ID, custodyGroupCount uint64) (*CustodyInfo, bool, error)
|
||||
|
||||
// If the value is already in the cache, return it.
|
||||
if value, ok := nodeInfoCache.Get(key); ok {
|
||||
peerInfo, ok := value.(*CustodyInfo)
|
||||
peerInfo, ok := value.(*info)
|
||||
if !ok {
|
||||
return nil, false, errors.New("failed to cast peer info (should never happen)")
|
||||
}
|
||||
@@ -69,7 +97,7 @@ func Info(nodeID enode.ID, custodyGroupCount uint64) (*CustodyInfo, bool, error)
|
||||
custodyGroupsMap[group] = true
|
||||
}
|
||||
|
||||
result := &CustodyInfo{
|
||||
result := &info{
|
||||
CustodyGroups: custodyGroupsMap,
|
||||
CustodyColumns: custodyColumns,
|
||||
DataColumnsSubnets: dataColumnsSubnets,
|
||||
@@ -81,6 +109,61 @@ func Info(nodeID enode.ID, custodyGroupCount uint64) (*CustodyInfo, bool, error)
|
||||
return result, false, nil
|
||||
}
|
||||
|
||||
// ActualGroupCount returns the actual custody group count.
|
||||
func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 {
|
||||
return min(custodyInfo.TargetGroupCount.Get(), custodyInfo.ToAdvertiseGroupCount.Get())
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the number of groups we should participate in for custody.
|
||||
func (tcgc *targetCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
tcgc.mut.RLock()
|
||||
defer tcgc.mut.RUnlock()
|
||||
|
||||
// If no validators are tracked, return the default custody requirement.
|
||||
if tcgc.validatorsCustodyRequirement == 0 {
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// Return the validators custody requirement.
|
||||
return tcgc.validatorsCustodyRequirement
|
||||
}
|
||||
|
||||
// setValidatorsCustodyRequirement sets the validators custody requirement.
|
||||
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
|
||||
tcgc.mut.Lock()
|
||||
defer tcgc.mut.Unlock()
|
||||
|
||||
tcgc.validatorsCustodyRequirement = value
|
||||
}
|
||||
|
||||
// Get returns the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
tacgc.mut.RLock()
|
||||
defer tacgc.mut.RUnlock()
|
||||
|
||||
return max(tacgc.value, custodyRequirement)
|
||||
}
|
||||
|
||||
// Set sets the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
|
||||
tacgc.mut.Lock()
|
||||
defer tacgc.mut.Unlock()
|
||||
|
||||
tacgc.value = value
|
||||
}
|
||||
|
||||
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
|
||||
func createInfoCacheIfNeeded() error {
|
||||
nodeInfoCacheMut.Lock()
|
||||
@@ -107,29 +190,3 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// ColumnIndices is a map of column indices where the key is the column index and the value is a boolean.
|
||||
// The boolean could indicate different things, eg whether the column is needed (in the context of satisfying custody requirements)
|
||||
// or present (in the context of a custody check on disk or in cache).
|
||||
type ColumnIndices map[uint64]bool
|
||||
|
||||
// CopyTrueIndices allows callers to get a copy of the given ColumnIndices, filtering out any keys
|
||||
// where the value == `false`.
|
||||
func CopyTrueIndices(src ColumnIndices) ColumnIndices {
|
||||
dst := make(ColumnIndices, len(src))
|
||||
for k, v := range src {
|
||||
if v {
|
||||
dst[k] = true
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// ColumnIndicesFromSlice converts a slice of uint64 indices into the ColumnIndices equivalent.
|
||||
func ColumnIndicesFromSlice(indices []uint64) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for _, index := range indices {
|
||||
ci[index] = true
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
@@ -25,3 +26,108 @@ func TestInfo(t *testing.T) {
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllColumns bool
|
||||
validatorsCustodyRequirement uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all data subnets",
|
||||
subscribeToAllColumns: true,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "no validators attached",
|
||||
subscribeToAllColumns: false,
|
||||
validatorsCustodyRequirement: 0,
|
||||
expected: 4,
|
||||
},
|
||||
{
|
||||
name: "some validators attached",
|
||||
subscribeToAllColumns: false,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllColumns {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the validators custody requirement.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Get the target custody group count.
|
||||
actual := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllColumns bool
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllColumns: true,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "higher than custody requirement",
|
||||
subscribeToAllColumns: false,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "lower than custody requirement",
|
||||
subscribeToAllColumns: false,
|
||||
toAdvertiseCustodyGroupCount: 1,
|
||||
expected: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllColumns {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Create a custody info.
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Get the to advertise custody group count.
|
||||
actual := custodyInfo.ToAdvertiseGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,10 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
const (
|
||||
CustodyGroupCountEnrKey = "cgc"
|
||||
kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
)
|
||||
|
||||
var (
|
||||
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
@@ -27,7 +30,7 @@ var (
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#custody-group-count
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCountKey }
|
||||
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
|
||||
|
||||
// VerifyDataColumnSidecar verifies if the data column sidecar is valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"domain.go",
|
||||
"signature.go",
|
||||
"signing_root.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
|
||||
@@ -24,6 +25,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"domain_test.go",
|
||||
"signature_test.go",
|
||||
"signing_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
34
beacon-chain/core/signing/signature.go
Normal file
34
beacon-chain/core/signing/signature.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
// Per spec, we want the fork version and genesis validator to be nil.
|
||||
// Which is genesis value and zero by default.
|
||||
sd, err := ComputeDomain(
|
||||
d,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
42
beacon-chain/core/signing/signature_test.go
Normal file
42
beacon-chain/core/signing/signature_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package signing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(reg, domain)
|
||||
require.NoError(t, err)
|
||||
sk.Sign(sr[:]).Marshal()
|
||||
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
|
||||
}
|
||||
@@ -53,11 +53,6 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -39,8 +39,10 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -22,8 +22,8 @@ type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
@@ -38,18 +38,13 @@ type DataColumnsVerifier interface {
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
nodeID enode.ID,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
custodyGroupCount uint64,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, newDataColumnsVerifier verification.NewDataColumnsVerifier, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
custodyInfo: custodyInfo,
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custodyGroupCount: custodyGroupCount,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,8 +155,6 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, curre
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
@@ -190,9 +183,11 @@ func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, bl
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the groups count.
|
||||
custodyGroupCount := s.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve peer info.
|
||||
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -27,7 +29,7 @@ var commitments = [][]byte{
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
@@ -42,7 +44,7 @@ func TestPersist(t *testing.T) {
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
@@ -57,7 +59,7 @@ func TestPersist(t *testing.T) {
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
@@ -74,7 +76,7 @@ func TestPersist(t *testing.T) {
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
@@ -112,7 +114,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -133,9 +135,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
@@ -219,10 +221,14 @@ func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -14,12 +14,6 @@ import (
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
AvailabilityChecker
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
}
|
||||
|
||||
// AvailabilityChecker is the minimum interface needed to check if data is available for a block.
|
||||
// We should prefer this interface over AvailabilityStore in places where we don't need to persist blob data.
|
||||
type AvailabilityChecker interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/beacon-chain:__subpackages__",
|
||||
"//genesis:__subpackages__",
|
||||
"//testing/slasher/simulator:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
|
||||
@@ -33,7 +33,6 @@ type ReadOnlyDatabase interface {
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HighestRootsBelowSlot(ctx context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error)
|
||||
EarliestSlot(ctx context.Context) (primitives.Slot, error)
|
||||
// State related methods.
|
||||
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
@@ -57,16 +56,14 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// Light client operations
|
||||
// light client operations
|
||||
LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]interfaces.LightClientUpdate, error)
|
||||
LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error)
|
||||
LightClientBootstrap(ctx context.Context, blockRoot []byte) (interfaces.LightClientBootstrap, error)
|
||||
// Origin checkpoint sync support
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
// Custody operations.
|
||||
CustodyInfo(ctx context.Context) (uint64, uint64, error)
|
||||
SubscribedToAllDataSubnets(ctx context.Context) (bool, error)
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -105,12 +102,6 @@ type NoHeadAccessDatabase interface {
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
// Custody operations.
|
||||
SaveCustodyGroupCount(ctx context.Context, custodyGroupCount uint64) error
|
||||
SaveSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) error
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, custodyGroupCount uint64, earliestAvailableSlot primitives.Slot) (uint64, primitives.Slot, error)
|
||||
}
|
||||
|
||||
// HeadAccessDatabase defines a struct with access to reading chain head data.
|
||||
|
||||
@@ -8,7 +8,6 @@ go_library(
|
||||
"backup.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"custody.go",
|
||||
"deposit_contract.go",
|
||||
"encoding.go",
|
||||
"error.go",
|
||||
@@ -39,6 +38,7 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -50,7 +50,6 @@ go_library(
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
@@ -107,6 +106,7 @@ go_test(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -116,8 +116,6 @@ go_test(
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//genesis/embedded:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -860,47 +860,6 @@ func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primi
|
||||
})
|
||||
}
|
||||
|
||||
// EarliestStoredSlot returns the earliest slot in the database.
|
||||
func (s *Store) EarliestSlot(ctx context.Context) (primitives.Slot, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.EarliestSlot")
|
||||
defer span.End()
|
||||
|
||||
earliestAvailableSlot := primitives.Slot(0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the root corresponding to the earliest available block.
|
||||
c := tx.Bucket(blockSlotIndicesBucket).Cursor()
|
||||
k, v := c.First()
|
||||
if k == nil || v == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
|
||||
// The genesis block may be indexed in this bucket, even if we started from a checkpoint.
|
||||
// Because of this, we check the next block. If the next block is still in the genesis epoch,
|
||||
// then we consider we have the whole chain.
|
||||
if slot != 0 {
|
||||
earliestAvailableSlot = slot
|
||||
}
|
||||
|
||||
k, v = c.Next()
|
||||
if k == nil || v == nil {
|
||||
// Only the genesis block is available.
|
||||
return nil
|
||||
}
|
||||
slot = bytesutil.BytesToSlotBigEndian(k)
|
||||
if slot < slotsPerEpoch {
|
||||
// We are still in the genesis epoch, so we consider we have the whole chain.
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot = slot
|
||||
return nil
|
||||
})
|
||||
|
||||
return earliestAvailableSlot, err
|
||||
}
|
||||
|
||||
type slotRoot struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
@@ -924,7 +883,7 @@ func (s *Store) slotRootsInRange(ctx context.Context, start, end primitives.Slot
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(key); ; /* rely on internal checks to exit */ k, v = c.Prev() {
|
||||
if len(k) == 0 && len(v) == 0 {
|
||||
// The `edge` variable and this `if` deal with 2 edge cases:
|
||||
// The `edge`` variable and this `if` deal with 2 edge cases:
|
||||
// - Seeking past the end of the bucket (the `end` param is higher than the highest slot).
|
||||
// - Seeking before the beginning of the bucket (the `start` param is lower than the lowest slot).
|
||||
// In both of these cases k,v will be nil and we can handle the same way using `edge` to
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// CustodyInfo returns the custody group count and the earliest available slot in the database.
|
||||
func (s *Store) CustodyInfo(ctx context.Context) (uint64, uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.CustodyInfo")
|
||||
defer span.End()
|
||||
|
||||
groupCount, earliestAvailableSlot := uint64(0), uint64(0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the group count.
|
||||
bytes := bucket.Get(groupCountKey)
|
||||
if len(bytes) != 0 {
|
||||
groupCount = bytesutil.BytesToUint64BigEndian(bytes)
|
||||
}
|
||||
|
||||
// Retrieve the earliest available slot.
|
||||
earliestSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(earliestSlotBytes) != 0 {
|
||||
earliestAvailableSlot = bytesutil.BytesToUint64BigEndian(earliestSlotBytes)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return groupCount, earliestAvailableSlot, err
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo atomically updates the custody group count only it is greater than the stored one.
|
||||
// In this case, it also updates the earliest available slot with the provided value.
|
||||
// It returns the stored custody group count and earliest available slot.
|
||||
func (s *Store) UpdateCustodyInfo(ctx context.Context, custodyGroupCount uint64, earliestAvailableSlot primitives.Slot) (uint64, primitives.Slot, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateCustodyInfo")
|
||||
defer span.End()
|
||||
|
||||
storedGroupCount, storedEarliestAvailableSlot := uint64(0), primitives.Slot(0)
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
// Retrieve the stored custody group count.
|
||||
storedGroupCountBytes := bucket.Get(groupCountKey)
|
||||
if len(storedGroupCountBytes) != 0 {
|
||||
storedGroupCount = bytesutil.BytesToUint64BigEndian(storedGroupCountBytes)
|
||||
}
|
||||
|
||||
// Retrieve the stored earliest available slot.
|
||||
storedEarliestAvailableSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(storedEarliestAvailableSlotBytes) != 0 {
|
||||
storedEarliestAvailableSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(storedEarliestAvailableSlotBytes))
|
||||
}
|
||||
|
||||
// Exit early if the new custody group count is lower than or equal to the stored one.
|
||||
if custodyGroupCount <= storedGroupCount {
|
||||
return nil
|
||||
}
|
||||
|
||||
storedGroupCount, storedEarliestAvailableSlot = custodyGroupCount, earliestAvailableSlot
|
||||
|
||||
// Store the earliest available slot.
|
||||
bytes := bytesutil.Uint64ToBytesBigEndian(uint64(earliestAvailableSlot))
|
||||
if err := bucket.Put(earliestAvailableSlotKey, bytes); err != nil {
|
||||
return errors.Wrap(err, "put earliest available slot")
|
||||
}
|
||||
|
||||
// Store the custody group count.
|
||||
bytes = bytesutil.Uint64ToBytesBigEndian(custodyGroupCount)
|
||||
if err := bucket.Put(groupCountKey, bytes); err != nil {
|
||||
return errors.Wrap(err, "put custody group count")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return storedGroupCount, storedEarliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// SaveCustodyGroupCount saves the custody group count to the database.
|
||||
func (s *Store) SaveCustodyGroupCount(ctx context.Context, custodyGroupCount uint64) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SetCustodyGroupCount")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
// Store the custody group count.
|
||||
custodyGroupCountBytes := bytesutil.Uint64ToBytesBigEndian(custodyGroupCount)
|
||||
if err := bucket.Put(groupCountKey, custodyGroupCountBytes); err != nil {
|
||||
return errors.Wrap(err, "put custody group count")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// SubscribedToAllDataSubnets checks in the database if the node is subscribed to all data subnets.
|
||||
func (s *Store) SubscribedToAllDataSubnets(ctx context.Context) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
|
||||
result := false
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subscribe all data subnets flag.
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// SaveSubscribedToAllDataSubnets saves the subscription status to all data subnets in the database.
|
||||
func (s *Store) SaveSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
// Store the subscription status.
|
||||
value := byte(0)
|
||||
if subscribed {
|
||||
value = 1
|
||||
}
|
||||
|
||||
if err := bucket.Put(subscribeAllDataSubnetsKey, []byte{value}); err != nil {
|
||||
return errors.Wrap(err, "put subscribe all data subnets")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
|
||||
// only if `subscribed` is `true`.
|
||||
// It returns the previous subscription status.
|
||||
func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateSubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
|
||||
result := false
|
||||
if !subscribed {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subscribe all data subnets flag.
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) != 0 && bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
if err := bucket.Put(subscribeAllDataSubnetsKey, []byte{1}); err != nil {
|
||||
return errors.Wrap(err, "put subscribe all data subnets")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
dbIface "github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -98,22 +97,8 @@ func (s *Store) EnsureEmbeddedGenesis(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !state.IsNil(gs) {
|
||||
if gs != nil && !gs.IsNil() {
|
||||
return s.SaveGenesisData(ctx, gs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LegacyGenesisProvider struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
func NewLegacyGenesisProvider(store *Store) *LegacyGenesisProvider {
|
||||
return &LegacyGenesisProvider{store: store}
|
||||
}
|
||||
|
||||
var _ genesis.Provider = &LegacyGenesisProvider{}
|
||||
|
||||
func (p *LegacyGenesisProvider) Genesis(ctx context.Context) (state.BeaconState, error) {
|
||||
return p.store.LegacyGenesisState(ctx)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -153,7 +152,6 @@ func TestEnsureEmbeddedGenesis(t *testing.T) {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
ctx := t.Context()
|
||||
db := setupDB(t)
|
||||
|
||||
|
||||
@@ -123,7 +123,6 @@ var Buckets = [][]byte{
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
custodyBucket,
|
||||
}
|
||||
|
||||
// KVStoreOption is a functional option that modifies a kv.Store.
|
||||
|
||||
@@ -70,10 +70,4 @@ var (
|
||||
|
||||
// Migrations
|
||||
migrationsBucket = []byte("migrations")
|
||||
|
||||
// Custody
|
||||
custodyBucket = []byte("custody")
|
||||
groupCountKey = []byte("group-count")
|
||||
earliestAvailableSlotKey = []byte("earliest-available-slot")
|
||||
subscribeAllDataSubnetsKey = []byte("subscribe-all-data-subnets")
|
||||
)
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
|
||||
statenative "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -63,21 +65,21 @@ func (s *Store) StateOrError(ctx context.Context, blockRoot [32]byte) (state.Bea
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
|
||||
st, err := genesis.State()
|
||||
if errors.Is(err, genesis.ErrGenesisStateNotInitialized) {
|
||||
log.WithError(err).Error("genesis state not initialized, returning nil state. this should only happen in tests")
|
||||
return nil, nil
|
||||
}
|
||||
return st, err
|
||||
}
|
||||
|
||||
// GenesisState returns the genesis state in beacon chain.
|
||||
func (s *Store) LegacyGenesisState(ctx context.Context) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LegacyGenesisState")
|
||||
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisState")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
cached, err := genesis.State(params.BeaconConfig().ConfigName)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
span.SetAttributes(trace.BoolAttribute("cache_hit", cached != nil))
|
||||
if cached != nil {
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
var st state.BeaconState
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve genesis block's signing root from blocks bucket,
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -489,7 +488,7 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), headRoot))
|
||||
genesis.StoreStateDuringTest(t, st)
|
||||
require.NoError(t, db.SaveState(t.Context(), st, headRoot))
|
||||
|
||||
savedGenesisS, err := db.GenesisState(t.Context())
|
||||
require.NoError(t, err)
|
||||
@@ -662,7 +661,7 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
|
||||
genesis.StoreStateDuringTest(t, genesisState)
|
||||
require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
|
||||
@@ -3,9 +3,9 @@ package kv
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis/embedded"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
@@ -18,7 +18,7 @@ func TestSaveOrigin(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
db := setupDB(t)
|
||||
|
||||
st, err := embedded.ByName(params.MainnetName)
|
||||
st, err := genesis.State(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := st.MarshalSSZ()
|
||||
|
||||
@@ -125,7 +125,6 @@ go_test(
|
||||
"//contracts/deposit/mock:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//monitoring/clientstats:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -98,12 +98,7 @@ const (
|
||||
// GetBlobsV2 request string for JSON-RPC.
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
// TODO: Remove temporarily needed hack since geth takes an input blobs txs with blobs proofs, and
|
||||
// does the heavy lifting of building cells proofs, while normally this is done by the tx sender.
|
||||
// This is a cool hack because it lets the CL to act as if the tx sender actually computed the cells proofs.
|
||||
// The only counter part is the `engine_getPayloadv<x>` takes a lot of time.
|
||||
// defaultEngineTimeout = time.Second
|
||||
defaultEngineTimeout = 2 * time.Second
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
contracts "github.com/OffchainLabs/prysm/v6/contracts/deposit"
|
||||
"github.com/OffchainLabs/prysm/v6/contracts/deposit/mock"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/clientstats"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -382,7 +381,6 @@ func TestInitDepositCache_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), blockRootA))
|
||||
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, blockRootA))
|
||||
genesis.StoreStateDuringTest(t, emptyState)
|
||||
s.chainStartData.Chainstarted = true
|
||||
require.NoError(t, s.initDepositCaches(t.Context(), ctrs))
|
||||
require.Equal(t, 3, len(s.cfg.depositCache.PendingContainers(t.Context(), nil)))
|
||||
@@ -448,7 +446,6 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), headRoot))
|
||||
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, headRoot))
|
||||
require.NoError(t, stateGen.SaveState(t.Context(), headRoot, emptyState))
|
||||
genesis.StoreStateDuringTest(t, emptyState)
|
||||
s.cfg.stateGen = stateGen
|
||||
require.NoError(t, emptyState.SetEth1DepositIndex(3))
|
||||
|
||||
@@ -597,7 +594,6 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genState)
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
|
||||
_, err = s1.validPowchainData(t.Context())
|
||||
require.NoError(t, err)
|
||||
@@ -659,7 +655,6 @@ func TestService_EnsureValidPowchainData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genState)
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
|
||||
|
||||
err = s1.cfg.beaconDB.SaveExecutionChainData(t.Context(), ðpb.ETH1ChainData{
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"clear_db.go",
|
||||
"config.go",
|
||||
"log.go",
|
||||
"node.go",
|
||||
@@ -24,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -50,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/sync/backfill:go_default_library",
|
||||
"//beacon-chain/sync/backfill/coverage:go_default_library",
|
||||
"//beacon-chain/sync/checkpoint:go_default_library",
|
||||
"//beacon-chain/sync/genesis:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
@@ -59,7 +60,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//monitoring/prometheus:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/slasherkv"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type dbClearer struct {
|
||||
shouldClear bool
|
||||
force bool
|
||||
confirmed bool
|
||||
}
|
||||
|
||||
const (
|
||||
clearConfirmation = "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
|
||||
clearDeclined = "Database will not be deleted. No changes have been made."
|
||||
)
|
||||
|
||||
func (c *dbClearer) clearKV(ctx context.Context, db *kv.Store) (*kv.Store, error) {
|
||||
if !c.shouldProceed() {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
log.Warning("Removing database")
|
||||
if err := db.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
return kv.NewKVStore(ctx, db.DatabasePath())
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearBlobs(bs *filesystem.BlobStorage) error {
|
||||
if !c.shouldProceed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Warning("Removing blob storage")
|
||||
if err := bs.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearColumns(cs *filesystem.DataColumnStorage) error {
|
||||
if !c.shouldProceed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Warning("Removing data columns storage")
|
||||
if err := cs.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear data columns storage")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearSlasher(ctx context.Context, db *slasherkv.Store) (*slasherkv.Store, error) {
|
||||
if !c.shouldProceed() {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
log.Warning("Removing slasher database")
|
||||
if err := db.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear slasher database")
|
||||
}
|
||||
return slasherkv.NewKVStore(ctx, db.DatabasePath())
|
||||
}
|
||||
|
||||
func (c *dbClearer) shouldProceed() bool {
|
||||
if !c.shouldClear {
|
||||
return false
|
||||
}
|
||||
if c.force {
|
||||
return true
|
||||
}
|
||||
if !c.confirmed {
|
||||
confirmed, err := cmd.ConfirmAction(clearConfirmation, clearDeclined)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Not clearing db due to confirmation error")
|
||||
return false
|
||||
}
|
||||
c.confirmed = confirmed
|
||||
}
|
||||
return c.confirmed
|
||||
}
|
||||
|
||||
func newDbClearer(cliCtx *cli.Context) *dbClearer {
|
||||
force := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
return &dbClearer{
|
||||
shouldClear: cliCtx.Bool(cmd.ClearDB.Name) || force,
|
||||
force: force,
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
@@ -52,6 +53,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill/coverage"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/checkpoint"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/genesis"
|
||||
initialsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd"
|
||||
@@ -61,7 +63,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/prometheus"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/prereqs"
|
||||
@@ -113,7 +114,7 @@ type BeaconNode struct {
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisProviders []genesis.Provider
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
@@ -123,11 +124,11 @@ type BeaconNode struct {
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
ConfigOptions []params.Option
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -136,13 +137,18 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
if err := configureBeacon(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set beacon configuration options")
|
||||
}
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
registry := runtime.NewServiceRegistry()
|
||||
ctx := cliCtx.Context
|
||||
|
||||
beacon := &BeaconNode{
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
services: runtime.NewServiceRegistry(),
|
||||
services: registry,
|
||||
stop: make(chan struct{}),
|
||||
stateFeed: new(event.Feed),
|
||||
blockFeed: new(event.Feed),
|
||||
@@ -160,6 +166,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
|
||||
}
|
||||
|
||||
@@ -169,25 +176,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
}
|
||||
}
|
||||
|
||||
dbClearer := newDbClearer(cliCtx)
|
||||
dataDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
boltFname := filepath.Join(dataDir, kv.BeaconNodeDbDirName)
|
||||
kvdb, err := openDB(ctx, boltFname, dbClearer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not open database")
|
||||
}
|
||||
beacon.db = kvdb
|
||||
|
||||
providers := append(beacon.GenesisProviders, kv.NewLegacyGenesisProvider(kvdb))
|
||||
if err := genesis.Initialize(ctx, dataDir, providers...); err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
|
||||
beacon.ConfigOptions = append([]params.Option{params.WithGenesisValidatorsRoot(genesis.ValidatorsRoot())}, beacon.ConfigOptions...)
|
||||
params.BeaconConfig().ApplyOptions(beacon.ConfigOptions...)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
params.LogDigests(params.BeaconConfig())
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
beacon.forkChoicer = doublylinkedtree.New()
|
||||
@@ -206,9 +194,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
}
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
if err := dbClearer.clearBlobs(beacon.BlobStorage); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if beacon.DataColumnStorage == nil {
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
|
||||
@@ -218,11 +203,8 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
|
||||
beacon.DataColumnStorage = dataColumnStorage
|
||||
}
|
||||
if err := dbClearer.clearColumns(beacon.DataColumnStorage); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
}
|
||||
@@ -310,7 +292,7 @@ func configureBeacon(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string, clearer *dbClearer) (*backfill.Store, error) {
|
||||
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
|
||||
ctx := cliCtx.Context
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
@@ -318,10 +300,9 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start slashing DB")
|
||||
}
|
||||
|
||||
@@ -501,6 +482,43 @@ func (b *BeaconNode) Close() {
|
||||
close(b.stop)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
|
||||
var err error
|
||||
clearDBConfirmed := false
|
||||
|
||||
if clearDB && !forceClearDB {
|
||||
const (
|
||||
actionText = "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
|
||||
deniedText = "Database will not be deleted. No changes have been made."
|
||||
)
|
||||
|
||||
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not confirm action")
|
||||
}
|
||||
}
|
||||
|
||||
if clearDBConfirmed || forceClearDB {
|
||||
log.Warning("Removing database")
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
if err := b.BlobStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
|
||||
knownContract, err := b.db.DepositContractAddress(b.ctx)
|
||||
if err != nil {
|
||||
@@ -524,36 +542,60 @@ func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store, error) {
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
var depositCache cache.DepositCache
|
||||
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(ctx, dbPath)
|
||||
d, err := kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
return errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
}
|
||||
|
||||
d, err = clearer.clearKV(ctx, d)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
if clearDBRequired || forceClearDBRequired {
|
||||
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
}
|
||||
|
||||
return d, d.RunMigrations(ctx)
|
||||
}
|
||||
if err := d.RunMigrations(b.ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
depositCache, err := depositsnapshot.New()
|
||||
b.db = d
|
||||
|
||||
depositCache, err = depositsnapshot.New()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create deposit cache")
|
||||
}
|
||||
|
||||
b.depositCache = depositCache
|
||||
|
||||
if b.GenesisInitializer != nil {
|
||||
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
|
||||
if errors.Is(err, db.ErrExistingGenesisState) {
|
||||
return errors.Errorf("Genesis state flag specified but a genesis state "+
|
||||
"exists already. Run again with --%s and/or ensure you are using the "+
|
||||
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "could not load genesis from file")
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
|
||||
return errors.Wrap(err, "could not ensure embedded genesis")
|
||||
}
|
||||
|
||||
if b.CheckpointInitializer != nil {
|
||||
log.Info("Checkpoint sync - Downloading origin state and block")
|
||||
if err := b.CheckpointInitializer.Initialize(b.ctx, b.db); err != nil {
|
||||
if err := b.CheckpointInitializer.Initialize(b.ctx, d); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -565,25 +607,49 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
log.WithField("address", depositAddress).Info("Deposit contract")
|
||||
return nil
|
||||
}
|
||||
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context, clearer *dbClearer) error {
|
||||
|
||||
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
if !b.slasherEnabled {
|
||||
return nil
|
||||
}
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
|
||||
if cliCtx.IsSet(flags.SlasherDirFlag.Name) {
|
||||
baseDir = cliCtx.String(flags.SlasherDirFlag.Name)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d, err = clearer.clearSlasher(b.ctx, d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not clear slasher database")
|
||||
clearDBConfirmed := false
|
||||
if clearDB && !forceClearDB {
|
||||
actionText := "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
deniedText := "Database will not be deleted. No changes have been made."
|
||||
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if clearDBConfirmed || forceClearDB {
|
||||
log.Warning("Removing database")
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
}
|
||||
|
||||
b.slasherDB = d
|
||||
return nil
|
||||
}
|
||||
@@ -651,6 +717,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -733,6 +800,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithCustodyInfo(b.custodyInfo),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
@@ -820,7 +888,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
regularsync.WithCustodyInfo(b.custodyInfo),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
regularsync.WithBatchVerifierLimit(b.cliCtx.Int(flags.BatchVerifierLimit.Name)),
|
||||
@@ -847,7 +915,6 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -942,7 +1009,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -970,7 +1036,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
@@ -1112,8 +1177,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
// TODO: Add backfill for data column storage
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.DataColumnStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
)
|
||||
|
||||
// Option for beacon node configuration.
|
||||
@@ -52,13 +51,6 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigOptions(opt ...params.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
|
||||
@@ -71,6 +71,7 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
@@ -168,6 +169,7 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
@@ -178,7 +180,6 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -273,8 +274,14 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return errors.New("attempted to broadcast nil light client optimistic update")
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -293,7 +300,13 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return errors.New("attempted to broadcast nil light client finality update")
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
|
||||
@@ -16,13 +16,12 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -62,7 +61,6 @@ func TestService_Broadcast(t *testing.T) {
|
||||
topic := "/eth2/%x/testing"
|
||||
// Set a test gossip mapping for testpb.TestSimpleMessage.
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest)
|
||||
@@ -553,7 +551,9 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, digest)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
@@ -618,7 +618,9 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, digest)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
)
|
||||
@@ -39,6 +40,7 @@ type Config struct {
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
@@ -3,68 +3,12 @@ package p2p
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ DataColumnsHandler = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
func (s *Service) EarliestAvailableSlot() primitives.Slot {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.earliestAvailableSlot
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the custody group count.
|
||||
func (s *Service) CustodyGroupCount() uint64 {
|
||||
s.custodyInfoMut.Lock()
|
||||
defer s.custodyInfoMut.Unlock()
|
||||
|
||||
return s.custodyGroupCount
|
||||
}
|
||||
|
||||
// UdpateCustodyInfo updates the custody group count and earliest available slot
|
||||
// if the new custody group count is greater than the stored one.
|
||||
// It returns the (potentially updated) earliest available slot and custody group count.
|
||||
func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
s.custodyInfoMut.Lock()
|
||||
defer s.custodyInfoMut.Unlock()
|
||||
|
||||
if custodyGroupCount <= s.custodyGroupCount {
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < s.earliestAvailableSlot {
|
||||
return 0, 0, errors.Errorf(
|
||||
"earliest available slot %d is less than the current one %d. (custody group count: %d, current one: %d)",
|
||||
earliestAvailableSlot, s.earliestAvailableSlot, custodyGroupCount, s.custodyGroupCount,
|
||||
)
|
||||
}
|
||||
|
||||
s.custodyGroupCount = custodyGroupCount
|
||||
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
if earliestAvailableSlot >= fuluForkSlot {
|
||||
s.earliestAvailableSlot = earliestAvailableSlot
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"earliestAvailableSlot": s.earliestAvailableSlot,
|
||||
"custodyGroupCount": s.custodyGroupCount,
|
||||
}).Debug("Custody info updated")
|
||||
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
// It first tries to get the custody group count from the peer's metadata,
|
||||
// then falls back to the ENR value if the metadata is not available, then
|
||||
@@ -128,19 +72,3 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
|
||||
return custodyGroupCount
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -237,33 +236,22 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody subnet count")
|
||||
return
|
||||
}
|
||||
|
||||
custodyGroupCount := s.CustodyGroupCount()
|
||||
|
||||
// We add `1` to the current epoch because we want to prepare one epoch before the Fulu fork.
|
||||
if currentEpoch+1 < fuluForkEpoch {
|
||||
// Is our custody group count record up to date?
|
||||
isCustodyGroupCountUpToDate := custodyGroupCount == inRecordCustodyGroupCount
|
||||
|
||||
// Altair behaviour.
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate && (!params.FuluEnabled() || isCustodyGroupCountUpToDate) {
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS, custodyGroupCount)
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
@@ -271,6 +259,16 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current custody group count.
|
||||
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
|
||||
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody subnet count")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the custody group count in our metadata.
|
||||
inMetadataCustodyGroupCount := s.Metadata().CustodyGroupCount()
|
||||
|
||||
@@ -567,23 +565,22 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
|
||||
localNode.Set(peerdas.Cgc(custodyGroupCount))
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
clock := startup.NewClock(s.genesisTime, [32]byte(s.genesisValidatorsRoot))
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
if err := updateENR(localNode, current, next); err != nil {
|
||||
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
|
||||
}
|
||||
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
custodyGroupCount := s.CustodyGroupCount()
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
hostIP := net.ParseIP(s.cfg.HostAddress)
|
||||
if hostIP.To4() == nil && hostIP.To16() == nil {
|
||||
@@ -688,7 +685,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
// Ignore nodes that don't match our fork digest.
|
||||
nodeENR := node.Record()
|
||||
if s.genesisValidatorsRoot != nil {
|
||||
if err := compareForkENR(s.dv5Listener.LocalNode().Node().Record(), nodeENR); err != nil {
|
||||
if err := s.compareForkENR(nodeENR); err != nil {
|
||||
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
@@ -156,27 +157,27 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: &Config{},
|
||||
cfg: &Config{CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
cfg: &Config{HostAddress: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host address",
|
||||
cfg: &Config{HostAddress: "192.168.0.1"},
|
||||
cfg: &Config{HostAddress: "192.168.0.1", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host DNS",
|
||||
cfg: &Config{HostDNS: "invalid"},
|
||||
cfg: &Config{HostDNS: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host DNS",
|
||||
cfg: &Config{HostDNS: "www.google.com"},
|
||||
cfg: &Config{HostDNS: "www.google.com", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
@@ -198,7 +199,6 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: tt.cfg,
|
||||
custodyGroupCount: params.BeaconConfig().CustodyRequirement,
|
||||
}
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
|
||||
@@ -210,7 +210,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedAddress := address
|
||||
if tt.cfg != nil && tt.cfg.HostAddress != "" {
|
||||
if tt.cfg.HostAddress != "" {
|
||||
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
|
||||
}
|
||||
|
||||
@@ -250,7 +250,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
// Check cgc config.
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, custodyGroupCount)))
|
||||
require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodyGroupCount)
|
||||
})
|
||||
}
|
||||
@@ -655,7 +655,7 @@ func checkPingCountCacheMetadataRecord(
|
||||
if expected.custodyGroupCount != nil {
|
||||
// Check custody subnet count in ENR.
|
||||
var actualCustodyGroupCount uint64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, &actualCustodyGroupCount))
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, &actualCustodyGroupCount))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expected.custodyGroupCount, actualCustodyGroupCount)
|
||||
|
||||
@@ -818,11 +818,10 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
actualPingCount++
|
||||
return nil
|
||||
},
|
||||
cfg: &Config{UDPPort: 2000},
|
||||
cfg: &Config{UDPPort: 2000, CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyGroupCount: params.BeaconConfig().CustodyRequirement,
|
||||
}
|
||||
|
||||
// Set the listener and the metadata.
|
||||
|
||||
@@ -3,18 +3,19 @@ package p2p
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errEth2ENRDigestMismatch = errors.New("fork digest of peer does not match local value")
|
||||
|
||||
// ENR key used for Ethereum consensus-related fork data.
|
||||
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
|
||||
|
||||
@@ -24,30 +25,29 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
if !s.isInitialized() {
|
||||
return [4]byte{}, errors.New("state is not initialized")
|
||||
}
|
||||
|
||||
clock := startup.NewClock(s.genesisTime, [32]byte(s.genesisValidatorsRoot))
|
||||
return params.ForkDigest(clock.CurrentEpoch()), nil
|
||||
return forks.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot)
|
||||
}
|
||||
|
||||
// Compares fork ENRs between an incoming peer's record and our node's
|
||||
// local record values for current and next fork version/epoch.
|
||||
func compareForkENR(self, peer *enr.Record) error {
|
||||
peerForkENR, err := forkEntry(peer)
|
||||
func (s *Service) compareForkENR(record *enr.Record) error {
|
||||
currentRecord := s.dv5Listener.LocalNode().Node().Record()
|
||||
peerForkENR, err := forkEntry(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentForkENR, err := forkEntry(self)
|
||||
currentForkENR, err := forkEntry(currentRecord)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enrString, err := SerializeENR(peer)
|
||||
enrString, err := SerializeENR(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Clients SHOULD connect to peers with current_fork_digest, next_fork_version,
|
||||
// and next_fork_epoch that match local values.
|
||||
if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) {
|
||||
return errors.Wrapf(errEth2ENRDigestMismatch,
|
||||
return fmt.Errorf(
|
||||
"fork digest of peer with ENR %s: %v, does not match local value: %v",
|
||||
enrString,
|
||||
peerForkENR.CurrentForkDigest,
|
||||
@@ -74,36 +74,41 @@ func compareForkENR(self, peer *enr.Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) error {
|
||||
// Adds a fork entry as an ENR record under the Ethereum consensus EnrKey for
|
||||
// the local node. The fork entry is an ssz-encoded enrForkID type
|
||||
// which takes into account the current fork version from the current
|
||||
// epoch to create a fork digest, the next fork version,
|
||||
// and the next fork epoch.
|
||||
func addForkEntry(
|
||||
node *enode.LocalNode,
|
||||
genesisTime time.Time,
|
||||
genesisValidatorsRoot []byte,
|
||||
) (*enode.LocalNode, error) {
|
||||
digest, err := forks.CreateForkDigest(genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
if prysmTime.Now().Before(genesisTime) {
|
||||
currentEpoch = 0
|
||||
}
|
||||
nextForkVersion, nextForkEpoch, err := forks.NextForkData(currentEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: entry.ForkDigest[:],
|
||||
NextForkVersion: next.ForkVersion[:],
|
||||
NextForkEpoch: next.Epoch,
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion[:],
|
||||
NextForkEpoch: nextForkEpoch,
|
||||
}
|
||||
if entry.Epoch == next.Epoch {
|
||||
enrForkID.NextForkEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"CurrentForkDigest": fmt.Sprintf("%#x", enrForkID.CurrentForkDigest),
|
||||
"NextForkVersion": fmt.Sprintf("%#x", enrForkID.NextForkVersion),
|
||||
"NextForkEpoch": fmt.Sprintf("%d", enrForkID.NextForkEpoch),
|
||||
}
|
||||
if params.BeaconConfig().FuluForkEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if entry.ForkDigest == next.ForkDigest {
|
||||
node.Set(enr.WithEntry(nfdEnrKey, make([]byte, len(next.ForkDigest))))
|
||||
} else {
|
||||
node.Set(enr.WithEntry(nfdEnrKey, next.ForkDigest[:]))
|
||||
}
|
||||
logFields[nfdEnrKey] = fmt.Sprintf("%#x", next.ForkDigest)
|
||||
}
|
||||
log.WithFields(logFields).Info("Updating ENR Fork ID")
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
forkEntry := enr.WithEntry(eth2ENRKey, enc)
|
||||
node.Set(forkEntry)
|
||||
return nil
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Retrieves an enrForkID from an ENR record by key lookup
|
||||
|
||||
@@ -8,121 +8,242 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestCompareForkENR(t *testing.T) {
|
||||
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
for i := 1; i <= 5; i++ {
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
// We give every peer a different genesis validators root, which
|
||||
// will cause each peer to have a different ForkDigest, preventing
|
||||
// them from connecting according to our discovery rules for Ethereum consensus.
|
||||
root := make([]byte, 32)
|
||||
copy(root, strconv.Itoa(port))
|
||||
s = &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: root,
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
for _, listener := range listeners {
|
||||
listener.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(discoveryWaitTime)
|
||||
|
||||
lastListener := listeners[len(listeners)-1]
|
||||
nodes := lastListener.Lookup(bootNode.ID())
|
||||
if len(nodes) < 4 {
|
||||
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
|
||||
"Expected more than or equal to %d but got %d", 4, len(nodes))
|
||||
}
|
||||
|
||||
// Now, we start a new p2p service. It should have no peers aside from the
|
||||
// bootnode given all nodes provided by discv5 will have different fork digests.
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
|
||||
// We should not have valid peers if the fork digest mismatched.
|
||||
assert.Equal(t, 0, len(addrs), "Expected 0 valid peers")
|
||||
require.NoError(t, s.Stop())
|
||||
}
|
||||
|
||||
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
hook := logTest.NewGlobal()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
s := &Service{
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
_, k := createAddrAndPrivKey(t)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
self := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(self, current, next))
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
expectErr error
|
||||
expectLog string
|
||||
node func(t *testing.T) *enode.Node
|
||||
}{
|
||||
{
|
||||
name: "match",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "current digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
testDigest := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, current.ForkDigest, testDigest, "ensure test fork digest is unique")
|
||||
current := current.Copy()
|
||||
current.ForkDigest = testDigest
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errEth2ENRDigestMismatch,
|
||||
},
|
||||
{
|
||||
name: "next fork version mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
testVersion := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, next.ForkVersion, testVersion, "ensure test fork version is unique")
|
||||
next := next.Copy()
|
||||
next.ForkVersion = testVersion
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork version",
|
||||
},
|
||||
{
|
||||
name: "next fork epoch mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
next := next.Copy()
|
||||
next.Epoch = next.Epoch + 1
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork epoch",
|
||||
},
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
peer := c.node(t)
|
||||
err := compareForkENR(self.Node().Record(), peer.Record())
|
||||
if c.expectErr != nil {
|
||||
require.ErrorIs(t, err, c.expectErr, "Expected error to match")
|
||||
} else {
|
||||
require.NoError(t, err, "Expected no error comparing fork ENRs")
|
||||
}
|
||||
if c.expectLog != "" {
|
||||
require.LogsContain(t, hook, c.expectLog, "Expected log message not found")
|
||||
}
|
||||
})
|
||||
var listeners []*listenerWrapper
|
||||
for i := 1; i <= 5; i++ {
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
nextForkEpoch := primitives.Epoch(i)
|
||||
c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
// We give every peer a different genesis validators root, which
|
||||
// will cause each peer to have a different ForkDigest, preventing
|
||||
// them from connecting according to our discovery rules for Ethereum consensus.
|
||||
s = &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
for _, listener := range listeners {
|
||||
listener.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(discoveryWaitTime)
|
||||
|
||||
lastListener := listeners[len(listeners)-1]
|
||||
nodes := lastListener.Lookup(bootNode.ID())
|
||||
if len(nodes) < 4 {
|
||||
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
|
||||
"Expected more than or equal to %d but got %d", 4, len(nodes))
|
||||
}
|
||||
|
||||
// Now, we start a new p2p service. It should have no peers aside from the
|
||||
// bootnode given all nodes provided by discv5 will have different fork digests.
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
cfg.StateNotifier = &mock.MockStateNotifier{}
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
addrs := make([]ma.Multiaddr, 0, len(nodes))
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
t.Error("Expected to have valid peers, got 0")
|
||||
}
|
||||
|
||||
require.LogsContain(t, hook, "Peer matches fork digest but has different next fork epoch")
|
||||
require.NoError(t, s.Stop())
|
||||
}
|
||||
|
||||
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): 0,
|
||||
{0, 0, 0, 1}: 1,
|
||||
}
|
||||
nextForkEpoch := primitives.Epoch(1)
|
||||
nextForkVersion := []byte{0, 0, 0, 1}
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
digest, err := forks.CreateForkDigest(genesisTime, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: current.ForkDigest[:],
|
||||
NextForkVersion: next.ForkVersion[:],
|
||||
NextForkEpoch: next.Epoch,
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion,
|
||||
NextForkEpoch: nextForkEpoch,
|
||||
}
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
entry := enr.WithEntry(eth2ENRKey, enc)
|
||||
// In epoch 1 of current time, the fork version should be
|
||||
// {0, 0, 0, 1} according to the configuration override above.
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
@@ -134,16 +255,18 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
localNode.Set(entry)
|
||||
|
||||
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, hexutil.Encode(current.ForkDigest[:]), hexutil.Encode(resp.CurrentForkDigest))
|
||||
assert.Equal(t, hexutil.Encode(next.ForkVersion[:]), hexutil.Encode(resp.NextForkVersion))
|
||||
assert.Equal(t, next.Epoch, resp.NextForkEpoch, "Unexpected next fork epoch")
|
||||
assert.DeepEqual(t, want[:], resp.CurrentForkDigest)
|
||||
assert.DeepEqual(t, nextForkVersion, resp.NextForkVersion)
|
||||
assert.Equal(t, nextForkEpoch, resp.NextForkEpoch, "Unexpected next fork epoch")
|
||||
}
|
||||
|
||||
func TestAddForkEntry_NextForkVersion(t *testing.T) {
|
||||
func TestAddForkEntry_Genesis(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
@@ -153,27 +276,17 @@ func TestAddForkEntry_NextForkVersion(t *testing.T) {
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
|
||||
bCfg := params.MainnetConfig()
|
||||
bCfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{}
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] = bCfg.GenesisEpoch
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
// Add the fork entry to the local node's ENR.
|
||||
require.NoError(t, updateENR(localNode, current, next))
|
||||
fe, err := forkEntry(localNode.Node().Record())
|
||||
localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
hexutil.Encode(params.BeaconConfig().AltairForkVersion), hexutil.Encode(fe.NextForkVersion),
|
||||
forkEntry, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t,
|
||||
params.BeaconConfig().GenesisForkVersion, forkEntry.NextForkVersion,
|
||||
"Wanted Next Fork Version to be equal to genesis fork version")
|
||||
|
||||
last := params.LastForkEpoch()
|
||||
current = params.GetNetworkScheduleEntry(last)
|
||||
next = params.NextNetworkScheduleEntry(last)
|
||||
require.NoError(t, updateENR(localNode, current, next))
|
||||
entry := params.NextNetworkScheduleEntry(last)
|
||||
fe, err = forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
hexutil.Encode(entry.ForkVersion[:]), hexutil.Encode(fe.NextForkVersion),
|
||||
"Wanted Next Fork Version to be equal to last entry in schedule")
|
||||
|
||||
}
|
||||
|
||||
@@ -10,22 +10,26 @@ import (
|
||||
// changes.
|
||||
func (s *Service) forkWatcher() {
|
||||
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
var scheduleEntry params.NetworkScheduleEntry
|
||||
for {
|
||||
select {
|
||||
case currSlot := <-slotTicker.C():
|
||||
if s.dv5Listener == nil {
|
||||
continue // TODO: Should forkWatcher run at all if we're in "local" mode?
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(currSlot)
|
||||
newEntry := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
if newEntry.ForkDigest != scheduleEntry.ForkDigest {
|
||||
nextEntry := params.NextNetworkScheduleEntry(currentEpoch)
|
||||
if err := updateENR(s.dv5Listener.LocalNode(), newEntry, nextEntry); err != nil {
|
||||
log.WithFields(newEntry.LogFields()).WithError(err).Error("Could not add fork entry")
|
||||
continue // don't replace scheduleEntry until this succeeds
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().DenebForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().ElectraForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().FuluForkEpoch {
|
||||
// If we are in the fork epoch, we update our enr with
|
||||
// the updated fork digest. These repeatedly does
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
// but is fine nonetheless.
|
||||
if s.dv5Listener != nil { // make sure it's not a local network
|
||||
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add fork entry")
|
||||
}
|
||||
}
|
||||
scheduleEntry = newEntry
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -36,11 +35,10 @@ type (
|
||||
DataColumnsHandler
|
||||
}
|
||||
|
||||
// Accessor provides access to the Broadcaster, PeerManager and DataColumnsHandler interfaces.
|
||||
// Accessor provides access to the Broadcaster and PeerManager interfaces.
|
||||
Accessor interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
DataColumnsHandler
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
@@ -122,9 +120,6 @@ type (
|
||||
|
||||
// DataColumnsHandler abstracts some data columns related methods.
|
||||
DataColumnsHandler interface {
|
||||
EarliestAvailableSlot() primitives.Slot
|
||||
CustodyGroupCount() uint64
|
||||
UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
CustodyGroupCountFromPeer(peer.ID) uint64
|
||||
}
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
@@ -38,7 +39,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
}
|
||||
_, fEpoch, err := params.ForkDataFromDigest(digest)
|
||||
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
// Impossible condition that should
|
||||
// never be hit.
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/golang/snappy"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -18,27 +18,28 @@ import (
|
||||
|
||||
func TestMsgID_HashesCorrectly(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
clock := startup.NewClock(time.Now(), bytesutil.ToBytes32([]byte{'A'}))
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
d := params.ForkDigest(clock.CurrentEpoch())
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
|
||||
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
|
||||
hashedData := hash.Hash(append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
|
||||
hashedData = hash.Hash(append(params.BeaconConfig().MessageDomainValidSnappy[:], validObj[:]...))
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
@@ -51,7 +52,7 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
combinedObj = append(combinedObj, pMsg.Data...)
|
||||
hashedData := hash.Hash(combinedObj)
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
@@ -62,12 +63,13 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
combinedObj = append(combinedObj, validObj[:]...)
|
||||
hashedData = hash.Hash(combinedObj)
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
@@ -80,7 +82,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
combinedObj = append(combinedObj, pMsg.Data...)
|
||||
hashedData := hash.Hash(combinedObj)
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
@@ -91,7 +93,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
combinedObj = append(combinedObj, validObj[:]...)
|
||||
hashedData = hash.Hash(combinedObj)
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMsgID_WithNilTopic(t *testing.T) {
|
||||
|
||||
@@ -42,7 +42,7 @@ func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
if flags.Get().MinimumSyncPeers < required {
|
||||
required = flags.Get().MinimumSyncPeers
|
||||
}
|
||||
_, peers := a.ps.BestFinalized(-1, a.fc.FinalizedCheckpoint().Epoch)
|
||||
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.fc.FinalizedCheckpoint().Epoch)
|
||||
if len(peers) < required {
|
||||
log.WithFields(logrus.Fields{
|
||||
"suitable": len(peers),
|
||||
@@ -52,33 +52,27 @@ func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
type AssignmentFilter func([]peer.ID) []peer.ID
|
||||
|
||||
// Assign uses the "BestFinalized" method to select the best peers that agree on a canonical block
|
||||
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
|
||||
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
|
||||
// the number of outbound requests to each peer from a given component.
|
||||
func (a *Assigner) Assign(filter AssignmentFilter) ([]peer.ID, error) {
|
||||
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
best, err := a.freshPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return filter(best), nil
|
||||
return pickBest(busy, n, best), nil
|
||||
}
|
||||
|
||||
// NotBusy is a filter that returns a list of peer.IDs with len() <= n, which are not in the `busy` map.
|
||||
// n == -1 will return all peers that are not busy.
|
||||
func NotBusy(busy map[peer.ID]bool, n int) AssignmentFilter {
|
||||
return func(peers []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0)
|
||||
for _, p := range peers {
|
||||
if n > 0 && len(ps) == n {
|
||||
return ps
|
||||
}
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
func pickBest(busy map[peer.ID]bool, n int, best []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0, n)
|
||||
for _, p := range best {
|
||||
if len(ps) == n {
|
||||
return ps
|
||||
}
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
return ps
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
@@ -18,9 +18,8 @@ func TestPickBest(t *testing.T) {
|
||||
expected []peer.ID
|
||||
}{
|
||||
{
|
||||
name: "don't limit",
|
||||
n: 0,
|
||||
expected: best,
|
||||
name: "",
|
||||
n: 0,
|
||||
},
|
||||
{
|
||||
name: "none busy",
|
||||
@@ -89,8 +88,7 @@ func TestPickBest(t *testing.T) {
|
||||
if c.best == nil {
|
||||
c.best = best
|
||||
}
|
||||
filt := NotBusy(c.busy, c.n)
|
||||
pb := filt(c.best)
|
||||
pb := pickBest(c.busy, c.n, c.best)
|
||||
require.Equal(t, len(c.expected), len(pb))
|
||||
for i := range c.expected {
|
||||
require.Equal(t, c.expected[i], pb[i])
|
||||
|
||||
@@ -54,7 +54,7 @@ type PeerData struct {
|
||||
NextValidTime time.Time
|
||||
// Chain related data.
|
||||
MetaData metadata.Metadata
|
||||
ChainState *ethpb.StatusV2
|
||||
ChainState *ethpb.Status
|
||||
ChainStateLastUpdated time.Time
|
||||
ChainStateValidationError error
|
||||
// Scorers internal data.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Scorer = (*BadResponsesScorer)(nil)
|
||||
@@ -131,14 +132,13 @@ func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
// if peerData, ok := s.store.PeerData(pid); ok {
|
||||
// TODO: Remote this out of devnet
|
||||
// if peerData.BadResponses >= s.config.Threshold {
|
||||
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
// }
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
|
||||
// return nil
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package scorers_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
@@ -12,41 +13,39 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
// const pid = "peer1"
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
ctx := t.Context()
|
||||
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 4,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
// scorer.Increment(pid)
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
// }
|
||||
scorer.Increment(pid)
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
@@ -138,60 +137,56 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
|
||||
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pid := peer.ID("peer1")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pid)
|
||||
// if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// } else {
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
// for i := 0; i < len(pids); i++ {
|
||||
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
// }
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pids[1])
|
||||
// scorer.Increment(pids[2])
|
||||
// scorer.Increment(pids[4])
|
||||
// }
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
// want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
// badPeers := scorer.BadPeers()
|
||||
// sort.Slice(badPeers, func(i, j int) bool {
|
||||
// return badPeers[i] < badPeers[j]
|
||||
// })
|
||||
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
// }
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
for i := 0; i < len(pids); i++ {
|
||||
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
}
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pids[1])
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
return badPeers[i] < badPeers[j]
|
||||
})
|
||||
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -112,7 +112,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
}
|
||||
|
||||
// SetPeerStatus sets chain state data for a given peer.
|
||||
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.StatusV2, validationError error) {
|
||||
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, validationError error) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
|
||||
@@ -130,14 +130,14 @@ func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.StatusV2, v
|
||||
// PeerStatus gets the chain state of the given remote peer.
|
||||
// This can return nil if there is no known chain state for the peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.StatusV2, error) {
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.peerStatusNoLock(pid)
|
||||
}
|
||||
|
||||
// peerStatusNoLock lock-free version of PeerStatus.
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.StatusV2, error) {
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.Status, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.ChainState == nil {
|
||||
return nil, peerdata.ErrNoPeerStatus
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent bad peer",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
@@ -48,7 +48,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer no head slot for the host node is known",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
@@ -61,7 +61,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer head is before ours",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(128)
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
@@ -75,12 +75,12 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := primitives.Slot(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
// Set another peer to a higher score.
|
||||
scorer.SetPeerStatus("peer2", &pb.StatusV2{
|
||||
scorer.SetPeerStatus("peer2", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 128,
|
||||
}, nil)
|
||||
@@ -95,7 +95,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := primitives.Slot(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
@@ -108,7 +108,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer no max known slot",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 0,
|
||||
}, nil)
|
||||
@@ -141,7 +141,7 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
}
|
||||
@@ -160,9 +160,9 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.StatusV2{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
@@ -179,12 +179,12 @@ func TestScorers_PeerStatus_PeerStatus(t *testing.T) {
|
||||
})
|
||||
status, err := peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
require.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err)
|
||||
assert.Equal(t, (*pb.StatusV2)(nil), status)
|
||||
assert.Equal(t, (*pb.Status)(nil), status)
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.StatusV2{
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.Status{
|
||||
HeadSlot: 128,
|
||||
}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.StatusV2{
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.Status{
|
||||
HeadSlot: 128,
|
||||
}, p2ptypes.ErrInvalidEpoch)
|
||||
status, err = peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
|
||||
@@ -211,102 +211,99 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_loop(t *testing.T) {
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
// defer cancel()
|
||||
func TestScorers_Service_loop(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 5,
|
||||
// DecayInterval: 50 * time.Millisecond,
|
||||
// },
|
||||
// BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
// DecayInterval: 25 * time.Millisecond,
|
||||
// Decay: 64,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 5,
|
||||
DecayInterval: 50 * time.Millisecond,
|
||||
},
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
DecayInterval: 25 * time.Millisecond,
|
||||
Decay: 64,
|
||||
},
|
||||
},
|
||||
})
|
||||
s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
// pid1 := peer.ID("peer1")
|
||||
// peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
// for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
// s1.Increment(pid1)
|
||||
// }
|
||||
// assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
pid1 := peer.ID("peer1")
|
||||
peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
// s2.IncrementProcessedBlocks("peer1", 221)
|
||||
// assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
|
||||
// done := make(chan struct{}, 1)
|
||||
// go func() {
|
||||
// defer func() {
|
||||
// done <- struct{}{}
|
||||
// }()
|
||||
// ticker := time.NewTicker(50 * time.Millisecond)
|
||||
// defer ticker.Stop()
|
||||
// for {
|
||||
// select {
|
||||
// case <-ticker.C:
|
||||
// if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
// return
|
||||
// }
|
||||
// case <-ctx.Done():
|
||||
// t.Error("Timed out")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- struct{}{}
|
||||
}()
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Error("Timed out")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// <-done
|
||||
// assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
// assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
// }
|
||||
<-done
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// }
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
// for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
// }
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -62,9 +62,7 @@ const (
|
||||
|
||||
const (
|
||||
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
// TODO: Revert this when out of devnet.
|
||||
// CollocationLimit = 5
|
||||
CollocationLimit = 9999
|
||||
CollocationLimit = 5
|
||||
|
||||
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
|
||||
maxLimitBuffer = 150
|
||||
@@ -207,14 +205,14 @@ func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
|
||||
}
|
||||
|
||||
// SetChainState sets the chain state of the given remote peer.
|
||||
func (p *Status) SetChainState(pid peer.ID, chainState *pb.StatusV2) {
|
||||
func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) {
|
||||
p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
|
||||
}
|
||||
|
||||
// ChainState gets the chain state of the given remote peer.
|
||||
// This will error if the peer does not exist.
|
||||
// This will error if there is no known chain state for the peer.
|
||||
func (p *Status) ChainState(pid peer.ID) (*pb.StatusV2, error) {
|
||||
func (p *Status) ChainState(pid peer.ID) (*pb.Status, error) {
|
||||
return p.scorers.PeerStatusScorer().PeerStatus(pid)
|
||||
}
|
||||
|
||||
@@ -772,7 +770,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
}
|
||||
|
||||
// Trim potential peers to at most maxPeers.
|
||||
if maxPeers > 0 && len(potentialPIDs) > maxPeers {
|
||||
if len(potentialPIDs) > maxPeers {
|
||||
potentialPIDs = potentialPIDs[:maxPeers]
|
||||
}
|
||||
|
||||
@@ -782,7 +780,6 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
// and is shared by at least minPeers.
|
||||
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
|
||||
@@ -2,6 +2,7 @@ package peers_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -288,7 +289,7 @@ func TestPeerChainState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedEpoch := primitives.Epoch(123)
|
||||
p.SetChainState(id, &pb.StatusV2{FinalizedEpoch: finalizedEpoch})
|
||||
p.SetChainState(id, &pb.Status{FinalizedEpoch: finalizedEpoch})
|
||||
|
||||
resChainState, err := p.ChainState(id)
|
||||
require.NoError(t, err)
|
||||
@@ -323,60 +324,59 @@ func TestPeerWithNilChainState(t *testing.T) {
|
||||
|
||||
resChainState, err := p.ChainState(id)
|
||||
require.Equal(t, peerdata.ErrNoPeerStatus, err)
|
||||
var nothing *pb.StatusV2
|
||||
var nothing *pb.Status
|
||||
require.Equal(t, resChainState, nothing)
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerBadResponses(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
func TestPeerBadResponses(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
// require.NoError(t, err)
|
||||
// {
|
||||
// _, err := id.MarshalBinary()
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
{
|
||||
_, err := id.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
// address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
// require.NoError(t, err, "Failed to create address")
|
||||
// direction := network.DirInbound
|
||||
// p.Add(new(enr.Record), id, address, direction)
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
direction := network.DirInbound
|
||||
p.Add(new(enr.Record), id, address, direction)
|
||||
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
// resBadResponses, err := scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// }
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
@@ -495,102 +495,100 @@ func TestPeerValidTime(t *testing.T) {
|
||||
assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers")
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPrune(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
func TestPrune(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// if i%7 == 0 {
|
||||
// // Peer added as disconnected.
|
||||
// _ = addPeer(t, p, peers.PeerDisconnected)
|
||||
// }
|
||||
// // Peer added to peer handler.
|
||||
// _ = addPeer(t, p, peers.PeerConnected)
|
||||
// }
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
|
||||
// disPeers := p.Disconnected()
|
||||
// firstPID := disPeers[0]
|
||||
// secondPID := disPeers[1]
|
||||
// thirdPID := disPeers[2]
|
||||
disPeers := p.Disconnected()
|
||||
firstPID := disPeers[0]
|
||||
secondPID := disPeers[1]
|
||||
thirdPID := disPeers[2]
|
||||
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
|
||||
// // Make first peer a bad peer
|
||||
// scorer.Increment(firstPID)
|
||||
// scorer.Increment(firstPID)
|
||||
// Make first peer a bad peer
|
||||
scorer.Increment(firstPID)
|
||||
scorer.Increment(firstPID)
|
||||
|
||||
// // Add bad response for p2.
|
||||
// scorer.Increment(secondPID)
|
||||
// Add bad response for p2.
|
||||
scorer.Increment(secondPID)
|
||||
|
||||
// // Prune peers
|
||||
// p.Prune()
|
||||
// Prune peers
|
||||
p.Prune()
|
||||
|
||||
// // Bad peer is expected to still be kept in handler.
|
||||
// badRes, err := scorer.Count(firstPID)
|
||||
// assert.NoError(t, err, "error is supposed to be nil")
|
||||
// assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
// Bad peer is expected to still be kept in handler.
|
||||
badRes, err := scorer.Count(firstPID)
|
||||
assert.NoError(t, err, "error is supposed to be nil")
|
||||
assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
|
||||
// // Not so good peer is pruned away so that we can reduce the
|
||||
// // total size of the handler.
|
||||
// _, err = scorer.Count(secondPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
// Not so good peer is pruned away so that we can reduce the
|
||||
// total size of the handler.
|
||||
_, err = scorer.Count(secondPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
|
||||
// // Last peer has been removed.
|
||||
// _, err = scorer.Count(thirdPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
// }
|
||||
// Last peer has been removed.
|
||||
_, err = scorer.Count(thirdPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerIPTracker(t *testing.T) {
|
||||
// resetCfg := features.InitWithReset(&features.Flags{
|
||||
// EnablePeerScorer: false,
|
||||
// })
|
||||
// defer resetCfg()
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// badIP := "211.227.218.116"
|
||||
// var badPeers []peer.ID
|
||||
// for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
// port := strconv.Itoa(3000 + i)
|
||||
// addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
// }
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
// }
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
|
||||
// // Add in bad peers, so that our records are trimmed out
|
||||
// // from the peer store.
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// // Peer added to peer handler.
|
||||
// pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
// p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// p.Prune()
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
// }
|
||||
// }
|
||||
for _, pr := range badPeers {
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
@@ -618,7 +616,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.StatusV2{
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 3,
|
||||
FinalizedRoot: mockroot3[:],
|
||||
@@ -626,7 +624,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.StatusV2{
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 4,
|
||||
FinalizedRoot: mockroot4[:],
|
||||
@@ -634,7 +632,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.StatusV2{
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 5,
|
||||
FinalizedRoot: mockroot5[:],
|
||||
@@ -642,7 +640,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 4
|
||||
pid4 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid4, &pb.StatusV2{
|
||||
p.SetChainState(pid4, &pb.Status{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
@@ -650,7 +648,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 5
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid5, &pb.StatusV2{
|
||||
p.SetChainState(pid5, &pb.Status{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
@@ -1014,7 +1012,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
for _, peerConfig := range tt.peers {
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.StatusV2{
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
|
||||
FinalizedEpoch: peerConfig.finalizedEpoch,
|
||||
HeadSlot: peerConfig.headSlot,
|
||||
})
|
||||
@@ -1041,7 +1039,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
|
||||
for i := 0; i <= maxPeers+100; i++ {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
FinalizedEpoch: 10,
|
||||
})
|
||||
}
|
||||
@@ -1064,7 +1062,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
|
||||
for i, headSlot := range peerSlots {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
HeadSlot: headSlot,
|
||||
})
|
||||
}
|
||||
@@ -1087,17 +1085,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
|
||||
})
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.StatusV2{
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.StatusV2{
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.StatusV2{
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
rSubD = 8 // random gossip target
|
||||
)
|
||||
|
||||
var ErrInvalidTopic = errors.New("invalid topic format")
|
||||
var errInvalidTopic = errors.New("invalid topic format")
|
||||
|
||||
// Specifies the fixed size context length.
|
||||
const digestLength = 4
|
||||
@@ -219,12 +219,12 @@ func convertTopicScores(topicMap map[string]*pubsub.TopicScoreSnapshot) map[stri
|
||||
func ExtractGossipDigest(topic string) ([4]byte, error) {
|
||||
// Ensure the topic prefix is correct.
|
||||
if len(topic) < len(gossipTopicPrefix)+1 || topic[:len(gossipTopicPrefix)] != gossipTopicPrefix {
|
||||
return [4]byte{}, ErrInvalidTopic
|
||||
return [4]byte{}, errInvalidTopic
|
||||
}
|
||||
start := len(gossipTopicPrefix)
|
||||
end := strings.Index(topic[start:], "/")
|
||||
if end == -1 { // Ensure a topic suffix exists.
|
||||
return [4]byte{}, ErrInvalidTopic
|
||||
return [4]byte{}, errInvalidTopic
|
||||
}
|
||||
end += start
|
||||
strDigest := topic[start:end]
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -32,14 +32,6 @@ var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
|
||||
const pubsubSubscriptionRequestLimit = 500
|
||||
|
||||
func (s *Service) setAllForkDigests() {
|
||||
entries := params.SortedNetworkScheduleEntries()
|
||||
s.allForkDigests = make(map[[4]byte]struct{}, len(entries))
|
||||
for _, entry := range entries {
|
||||
s.allForkDigests[entry.ForkDigest] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
if !s.isInitialized() {
|
||||
@@ -56,18 +48,50 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
if parts[1] != "eth2" {
|
||||
return false
|
||||
}
|
||||
|
||||
var digest [4]byte
|
||||
dl, err := hex.Decode(digest[:], []byte(parts[2]))
|
||||
if err == nil && dl != 4 {
|
||||
err = fmt.Errorf("expected 4 bytes, got %d", dl)
|
||||
}
|
||||
phase0ForkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("topic", topic).WithField("digest", parts[2]).Error("CanSubscribe failed to parse message")
|
||||
log.WithError(err).Error("Could not determine fork digest")
|
||||
return false
|
||||
}
|
||||
if _, ok := s.allForkDigests[digest]; !ok {
|
||||
log.WithField("topic", topic).WithField("digest", fmt.Sprintf("%#x", digest)).Error("CanSubscribe failed to find digest in allForkDigests")
|
||||
altairForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine altair fork digest")
|
||||
return false
|
||||
}
|
||||
bellatrixForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Bellatrix fork digest")
|
||||
return false
|
||||
}
|
||||
capellaForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Deneb fork digest")
|
||||
return false
|
||||
}
|
||||
electraForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Electra fork digest")
|
||||
return false
|
||||
}
|
||||
fuluForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Fulu fork digest")
|
||||
return false
|
||||
}
|
||||
switch parts[2] {
|
||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||
case fmt.Sprintf("%x", altairForkDigest):
|
||||
case fmt.Sprintf("%x", bellatrixForkDigest):
|
||||
case fmt.Sprintf("%x", capellaForkDigest):
|
||||
case fmt.Sprintf("%x", denebForkDigest):
|
||||
case fmt.Sprintf("%x", electraForkDigest):
|
||||
case fmt.Sprintf("%x", fuluForkDigest):
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -19,11 +21,12 @@ import (
|
||||
|
||||
func TestService_CanSubscribe(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
currentFork := params.GetNetworkScheduleEntry(clock.CurrentEpoch()).ForkDigest
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
type test struct {
|
||||
name string
|
||||
topic string
|
||||
@@ -105,14 +108,12 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
}
|
||||
tests = append(tests, tt)
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: clock.GenesisTime(),
|
||||
genesisTime: genesisTime,
|
||||
}
|
||||
s.setAllForkDigests()
|
||||
if got := s.CanSubscribe(tt.topic); got != tt.want {
|
||||
t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want)
|
||||
}
|
||||
@@ -218,10 +219,11 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
|
||||
|
||||
func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
type args struct {
|
||||
id peer.ID
|
||||
subs []*pubsubpb.RPC_SubOpts
|
||||
@@ -318,14 +320,12 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: clock.GenesisTime(),
|
||||
genesisTime: genesisTime,
|
||||
}
|
||||
s.setAllForkDigests()
|
||||
got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("FilterIncomingSubscriptions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
|
||||
@@ -108,8 +108,6 @@ const (
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
|
||||
RPCStatusTopicV2 = protocolPrefix + StatusMessageName + SchemaVersionV2
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
|
||||
// RPCBlocksByRootTopicV2 defines the v2 topic for the blocks by root rpc method.
|
||||
@@ -132,7 +130,6 @@ var (
|
||||
RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Status Message
|
||||
RPCStatusTopicV1: new(pb.Status),
|
||||
RPCStatusTopicV2: new(pb.StatusV2),
|
||||
|
||||
// RPC Goodbye Message
|
||||
RPCGoodByeTopicV1: new(primitives.SSZUint64),
|
||||
@@ -169,7 +166,7 @@ var (
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -204,7 +201,6 @@ var (
|
||||
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
fuluMapping = map[string]string{
|
||||
StatusMessageName: SchemaVersionV2,
|
||||
MetadataMessageName: SchemaVersionV3,
|
||||
}
|
||||
|
||||
|
||||
@@ -141,11 +141,6 @@ func TestTopicFromMessage_CorrectType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/beacon_blocks_by_range/2", topic)
|
||||
|
||||
// Modified in fulu fork.
|
||||
topic, err = TopicFromMessage(StatusMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/status/2", topic)
|
||||
|
||||
// Modified both in altair and fulu fork.
|
||||
topic, err = TopicFromMessage(MetadataMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -14,10 +14,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
prysmnetwork "github.com/OffchainLabs/prysm/v6/network"
|
||||
@@ -90,11 +88,6 @@ type Service struct {
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfoMut sync.RWMutex // Protects custodyGroupCount and earliestAvailableSlot
|
||||
custodyGroupCount uint64
|
||||
earliestAvailableSlot primitives.Slot
|
||||
clock *startup.Clock
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
}
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
@@ -199,7 +192,6 @@ func (s *Service) Start() {
|
||||
// Waits until the state is initialized via an event feed.
|
||||
// Used for fork-related data when connecting peers.
|
||||
s.awaitStateInitialized()
|
||||
s.setAllForkDigests()
|
||||
s.isPreGenesis = false
|
||||
|
||||
var relayNodes []string
|
||||
@@ -453,7 +445,7 @@ func (s *Service) awaitStateInitialized() {
|
||||
s.genesisTime = clock.GenesisTime()
|
||||
gvr := clock.GenesisValidatorsRoot()
|
||||
s.genesisValidatorsRoot = gvr[:]
|
||||
_, err = s.currentForkDigest()
|
||||
_, err = s.currentForkDigest() // initialize fork digest cache
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize fork digest")
|
||||
}
|
||||
|
||||
@@ -10,9 +10,13 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
@@ -332,16 +336,14 @@ func TestPeer_Disconnect(t *testing.T) {
|
||||
|
||||
func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
gs := startup.NewClockSynchronizer()
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs})
|
||||
require.NoError(t, err)
|
||||
|
||||
go s.awaitStateInitialized()
|
||||
fd := initializeStateWithForkDigest(ctx, t, gs)
|
||||
s.setAllForkDigests()
|
||||
s.awaitStateInitialized()
|
||||
|
||||
assert.Equal(t, 0, len(s.joinedTopics))
|
||||
|
||||
@@ -370,58 +372,59 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
// digest associated with that genesis event.
|
||||
func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte {
|
||||
gt := prysmTime.Now()
|
||||
gvr := params.BeaconConfig().GenesisValidatorsRoot
|
||||
clock := startup.NewClock(gt, gvr)
|
||||
require.NoError(t, gs.SetClock(clock))
|
||||
gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32))
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr)))
|
||||
|
||||
fd, err := forks.CreateForkDigest(gt, gvr[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
|
||||
|
||||
return params.ForkDigest(clock.CurrentEpoch())
|
||||
return fd
|
||||
}
|
||||
|
||||
// TODO: Uncomment out of devnet.
|
||||
// func TestService_connectWithPeer(t *testing.T) {
|
||||
// params.SetupTestConfigCleanup(t)
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// peers *peers.Status
|
||||
// info peer.AddrInfo
|
||||
// wantErr string
|
||||
// }{
|
||||
// {
|
||||
// name: "bad peer",
|
||||
// peers: func() *peers.Status {
|
||||
// ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// for i := 0; i < 10; i++ {
|
||||
// ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
// }
|
||||
// return ps
|
||||
// }(),
|
||||
// info: peer.AddrInfo{ID: "bad"},
|
||||
// wantErr: "bad peer",
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// h, _, _ := createHost(t, 34567)
|
||||
// defer func() {
|
||||
// if err := h.Close(); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// }()
|
||||
// ctx := t.Context()
|
||||
// s := &Service{
|
||||
// host: h,
|
||||
// peers: tt.peers,
|
||||
// }
|
||||
// err := s.connectWithPeer(ctx, tt.info)
|
||||
// if len(tt.wantErr) > 0 {
|
||||
// require.ErrorContains(t, tt.wantErr, err)
|
||||
// } else {
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
peers *peers.Status
|
||||
info peer.AddrInfo
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "bad peer",
|
||||
peers: func() *peers.Status {
|
||||
ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
for i := 0; i < 10; i++ {
|
||||
ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
}
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _, _ := createHost(t, 34567)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
ctx := t.Context()
|
||||
s := &Service{
|
||||
host: h,
|
||||
peers: tt.peers,
|
||||
}
|
||||
err := s.connectWithPeer(ctx, tt.info)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,8 +27,6 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
const nfdEnrKey = "nfd" // The ENR record key for "nfd" (Next Fork Digest).
|
||||
|
||||
var (
|
||||
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
@@ -392,23 +390,11 @@ func (s *Service) updateSubnetRecordWithMetadata(bitV bitfield.Bitvector64) {
|
||||
// with a new value for a bitfield of subnets tracked. It also record's
|
||||
// the sync committee subnet in the enr. It also updates the node's
|
||||
// metadata by increasing the sequence number and the subnets tracked by the node.
|
||||
func (s *Service) updateSubnetRecordWithMetadataV2(
|
||||
bitVAtt bitfield.Bitvector64,
|
||||
bitVSync bitfield.Bitvector4,
|
||||
custodyGroupCount uint64,
|
||||
) {
|
||||
func (s *Service) updateSubnetRecordWithMetadataV2(bitVAtt bitfield.Bitvector64, bitVSync bitfield.Bitvector4) {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, &bitVAtt)
|
||||
subEntry := enr.WithEntry(syncCommsSubnetEnrKey, &bitVSync)
|
||||
|
||||
localNode := s.dv5Listener.LocalNode()
|
||||
localNode.Set(entry)
|
||||
localNode.Set(subEntry)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCountEntry := enr.WithEntry(custodyGroupCountEnrKey, custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
|
||||
s.dv5Listener.LocalNode().Set(entry)
|
||||
s.dv5Listener.LocalNode().Set(subEntry)
|
||||
s.metaData = wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: s.metaData.SequenceNumber() + 1,
|
||||
Attnets: bitVAtt,
|
||||
@@ -435,8 +421,10 @@ func (s *Service) updateSubnetRecordWithMetadataV3(
|
||||
localNode.Set(syncSubnetsEntry)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
|
||||
newSeqNumber := s.metaData.SequenceNumber() + 1
|
||||
|
||||
s.metaData = wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: s.metaData.SequenceNumber() + 1,
|
||||
SeqNumber: newSeqNumber,
|
||||
Attnets: bitVAtt,
|
||||
Syncnets: bitVSync,
|
||||
CustodyGroupCount: custodyGroupCount,
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -34,8 +35,17 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
// find and connect to a node already subscribed to a specific subnet.
|
||||
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
|
||||
|
||||
const subnetCount = 3
|
||||
const minimumPeersPerSubnet = 1
|
||||
// Define the genesis validators root, to ensure everybody is on the same network.
|
||||
const (
|
||||
genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
|
||||
subnetCount = 3
|
||||
minimumPeersPerSubnet = 1
|
||||
)
|
||||
|
||||
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a context.
|
||||
ctx := t.Context()
|
||||
|
||||
// Use shorter period for testing.
|
||||
@@ -47,7 +57,6 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
|
||||
// Create flags.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
@@ -64,7 +73,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
bootNodeService := &Service{
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot[:],
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
|
||||
bootNodeForkDigest, err := bootNodeService.currentForkDigest()
|
||||
@@ -98,7 +107,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
|
||||
nodeForkDigest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -143,11 +152,11 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
QUICPort: 3010,
|
||||
}
|
||||
|
||||
service, err := NewService(t.Context(), cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
|
||||
service.Start()
|
||||
defer func() {
|
||||
|
||||
@@ -15,7 +15,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
@@ -25,7 +24,6 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
@@ -46,7 +44,6 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -197,22 +196,6 @@ func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot -- fake.
|
||||
func (*FakeP2P) EarliestAvailableSlot() primitives.Slot {
|
||||
return 0
|
||||
}
|
||||
|
||||
// CustodyGroupCount -- fake.
|
||||
func (*FakeP2P) CustodyGroupCount() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// SetCustostyGroupCount -- fake.
|
||||
func (s *FakeP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer -- fake.
|
||||
func (*FakeP2P) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
}
|
||||
m.peers.Add(createENR(), id0, ma0, network.DirInbound)
|
||||
m.peers.SetConnectionState(id0, peers.Connected)
|
||||
m.peers.SetChainState(id0, &pb.StatusV2{FinalizedEpoch: 10})
|
||||
m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: 10})
|
||||
id1, err := peer.Decode(MockRawPeerId1)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
@@ -76,7 +76,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
}
|
||||
m.peers.Add(createENR(), id1, ma1, network.DirOutbound)
|
||||
m.peers.SetConnectionState(id1, peers.Connected)
|
||||
m.peers.SetChainState(id1, &pb.StatusV2{FinalizedEpoch: 11})
|
||||
m.peers.SetChainState(id1, &pb.Status{FinalizedEpoch: 11})
|
||||
}
|
||||
return m.peers
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -18,7 +17,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -35,7 +33,6 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -51,19 +48,16 @@ const (
|
||||
|
||||
// TestP2P represents a p2p implementation that can be used for testing.
|
||||
type TestP2P struct {
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled atomic.Bool
|
||||
DelaySend bool
|
||||
Digest [4]byte
|
||||
peers *peers.Status
|
||||
LocalMetadata metadata.Metadata
|
||||
custodyInfoMut sync.RWMutex
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled atomic.Bool
|
||||
DelaySend bool
|
||||
Digest [4]byte
|
||||
peers *peers.Status
|
||||
LocalMetadata metadata.Metadata
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -467,48 +461,6 @@ func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// CustodyGroupCount .
|
||||
func (s *TestP2P) CustodyGroupCount() uint64 {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.custodyGroupCount
|
||||
}
|
||||
|
||||
// SetCustodyGroupCount .
|
||||
// UdpateCustodyInfo updates the custody group count and earliest available slot
|
||||
// if the new custody group count is greater than the stored one.
|
||||
// It returns the (potentially updated) earliest available slot and custody group count.
|
||||
func (s *TestP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
s.custodyInfoMut.Lock()
|
||||
defer s.custodyInfoMut.Unlock()
|
||||
|
||||
if custodyGroupCount <= s.custodyGroupCount {
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < s.earliestAvailableSlot {
|
||||
return 0, 0, errors.Errorf(
|
||||
"earliest available slot %d is less than the current one %d. (custody group count: %d, current one: %d)",
|
||||
earliestAvailableSlot, s.earliestAvailableSlot, custodyGroupCount, s.custodyGroupCount,
|
||||
)
|
||||
}
|
||||
|
||||
s.earliestAvailableSlot = earliestAvailableSlot
|
||||
s.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot .
|
||||
func (s *TestP2P) EarliestAvailableSlot() primitives.Slot {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.earliestAvailableSlot
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer .
|
||||
func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of groups.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
@@ -206,8 +206,8 @@ func (s BlobSidecarsByRootReq) Swap(i, j int) {
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s *BlobSidecarsByRootReq) Len() int {
|
||||
return len(*s)
|
||||
func (s BlobSidecarsByRootReq) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// ====================================
|
||||
|
||||
@@ -130,6 +130,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
|
||||
name: namespace + ".BlockRewards",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.BlockRewards,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -140,6 +141,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.AttestationRewards,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -150,6 +152,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SyncCommitteeRewards,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -172,6 +175,7 @@ func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".ExpectedWithdrawals",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ExpectedWithdrawals,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -194,6 +198,7 @@ func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
name: namespace + ".Blobs",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.Blobs,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -237,6 +242,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetAggregateAttestation",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAggregateAttestation,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -246,6 +252,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetAggregateAttestationV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAggregateAttestationV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -256,6 +263,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitContributionAndProofs,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -267,6 +275,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAggregateAndProofs,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -277,6 +286,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAggregateAndProofsV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -286,6 +296,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".ProduceSyncCommitteeContribution",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ProduceSyncCommitteeContribution,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -296,6 +307,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitSyncCommitteeSubscription,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -306,6 +318,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitBeaconCommitteeSubscription,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -315,6 +328,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetAttestationData",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttestationData,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -325,6 +339,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.RegisterValidator,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -335,6 +350,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttesterDuties,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -344,6 +360,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetProposerDuties",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetProposerDuties,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -354,6 +371,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetSyncCommitteeDuties,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -364,6 +382,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PrepareBeaconProposer,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -374,6 +393,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLiveness,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -383,6 +403,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".ProduceBlockV3",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ProduceBlockV3,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -429,6 +450,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetSyncStatus",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetSyncStatus,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -438,6 +460,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetIdentity",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetIdentity,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -447,6 +470,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -456,6 +480,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetPeers",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPeers,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -465,6 +490,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetPeerCount",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPeerCount,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -474,6 +500,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetVersion",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetVersion,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -483,6 +510,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetHealth",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetHealth,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -533,6 +561,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetCommittees",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetCommittees,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -542,6 +571,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetStateFork",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetStateFork,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -551,6 +581,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetStateRoot",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetStateRoot,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -560,6 +591,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetSyncCommittees",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetSyncCommittees,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -569,6 +601,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetRandao",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetRandao,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -580,6 +613,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlock,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -591,6 +625,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlindedBlock,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -601,6 +636,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlockV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -611,6 +647,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlindedBlockV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -620,6 +657,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -630,6 +668,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockAttestations",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockAttestations,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -639,6 +678,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockAttestationsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockAttestationsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -648,6 +688,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlindedBlock",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlindedBlock,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -657,6 +698,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockRoot",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockRoot,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -667,6 +709,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListAttestations",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListAttestations,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -676,6 +719,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListAttestationsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListAttestationsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -686,6 +730,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttestations,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -696,6 +741,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttestationsV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -705,6 +751,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListVoluntaryExits",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListVoluntaryExits,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -715,6 +762,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitVoluntaryExit,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -725,6 +773,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitSyncCommitteeSignatures,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -734,6 +783,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListBLSToExecutionChanges",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListBLSToExecutionChanges,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -744,6 +794,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitBLSToExecutionChanges,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -754,6 +805,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetAttesterSlashings",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttesterSlashings,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -763,6 +815,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetAttesterSlashingsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttesterSlashingsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -773,6 +826,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttesterSlashings,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -783,6 +837,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttesterSlashingsV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -792,6 +847,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetProposerSlashings",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetProposerSlashings,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -802,6 +858,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitProposerSlashing,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -811,6 +868,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockHeaders",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockHeaders,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -820,6 +878,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockHeader",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockHeader,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -829,6 +888,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetGenesis",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetGenesis,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -838,6 +898,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetFinalityCheckpoints",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetFinalityCheckpoints,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -848,6 +909,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidators,
|
||||
methods: []string{http.MethodGet, http.MethodPost},
|
||||
@@ -857,6 +919,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetValidator",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidator,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -867,6 +930,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidatorBalances,
|
||||
methods: []string{http.MethodGet, http.MethodPost},
|
||||
@@ -887,6 +951,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetDepositSnapshot",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetDepositSnapshot,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -896,6 +961,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetPendingDeposits",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPendingDeposits,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -914,6 +980,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetPendingPartialWithdrawals",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPendingPartialWithdrawals,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -939,6 +1006,7 @@ func (*Service) configEndpoints() []endpoint {
|
||||
name: namespace + ".GetDepositContract",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: config.GetDepositContract,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -948,6 +1016,7 @@ func (*Service) configEndpoints() []endpoint {
|
||||
name: namespace + ".GetForkSchedule",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: config.GetForkSchedule,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -957,6 +1026,7 @@ func (*Service) configEndpoints() []endpoint {
|
||||
name: namespace + ".GetSpec",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: config.GetSpec,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -976,6 +1046,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientBootstrap",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientBootstrap,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -985,6 +1056,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientUpdatesByRange",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientUpdatesByRange,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -994,6 +1066,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientFinalityUpdate",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientFinalityUpdate,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1003,6 +1076,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientOptimisticUpdate",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientOptimisticUpdate,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1029,6 +1103,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".GetBeaconStateV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBeaconStateV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1038,6 +1113,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".GetForkChoiceHeadsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetForkChoiceHeadsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1047,6 +1123,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".GetForkChoice",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetForkChoice,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1106,6 +1183,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetWeakSubjectivity",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetWeakSubjectivity,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1115,6 +1193,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetValidatorCount",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidatorCount,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1124,6 +1203,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetValidatorCount",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidatorCount,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1134,6 +1214,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetIndividualVotes,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1143,6 +1224,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetChainHead",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetChainHead,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1153,6 +1235,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlobs,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1180,6 +1263,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".ListTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListTrustedPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1189,6 +1273,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".ListTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListTrustedPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1199,6 +1284,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.AddTrustedPeer,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1209,6 +1295,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.AddTrustedPeer,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1218,6 +1305,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".RemoveTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.RemoveTrustedPeer,
|
||||
methods: []string{http.MethodDelete},
|
||||
@@ -1227,6 +1315,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".RemoveTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.RemoveTrustedPeer,
|
||||
methods: []string{http.MethodDelete},
|
||||
@@ -1249,6 +1338,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPerformance,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1259,6 +1349,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPerformance,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1268,6 +1359,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
name: namespace + ".GetParticipation",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetParticipation,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1277,6 +1369,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
name: namespace + ".GetActiveSetChanges",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetActiveSetChanges,
|
||||
methods: []string{http.MethodGet},
|
||||
|
||||
@@ -1103,9 +1103,9 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(msgsInPool))
|
||||
assert.Equal(t, primitives.Slot(1), msgsInPool[0].Slot)
|
||||
assert.Equal(t, "0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c", hexutil.Encode(msgsInPool[0].BlockRoot))
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(msgsInPool[0].BlockRoot))
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), msgsInPool[0].ValidatorIndex)
|
||||
assert.Equal(t, "0xb591bd4ca7d745b6e027879645d7c014fecb8c58631af070f7607acc0c1c948a5102a33267f0e4ba41a85b254b07df91185274375b2e6436e37e81d2fd46cb3751f5a6c86efb7499c1796c0c17e122a54ac067bb0f5ff41f3241659cceb0c21c", hexutil.Encode(msgsInPool[0].Signature))
|
||||
assert.Equal(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505", hexutil.Encode(msgsInPool[0].Signature))
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -2497,23 +2497,23 @@ var (
|
||||
singleSyncCommitteeMsg = `[
|
||||
{
|
||||
"slot": "1",
|
||||
"beacon_block_root": "0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "1",
|
||||
"signature": "0xb591bd4ca7d745b6e027879645d7c014fecb8c58631af070f7607acc0c1c948a5102a33267f0e4ba41a85b254b07df91185274375b2e6436e37e81d2fd46cb3751f5a6c86efb7499c1796c0c17e122a54ac067bb0f5ff41f3241659cceb0c21c"
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
]`
|
||||
multipleSyncCommitteeMsg = `[
|
||||
{
|
||||
"slot": "1",
|
||||
"beacon_block_root": "0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "1",
|
||||
"signature": "0xb591bd4ca7d745b6e027879645d7c014fecb8c58631af070f7607acc0c1c948a5102a33267f0e4ba41a85b254b07df91185274375b2e6436e37e81d2fd46cb3751f5a6c86efb7499c1796c0c17e122a54ac067bb0f5ff41f3241659cceb0c21c"
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
{
|
||||
"slot": "2",
|
||||
"beacon_block_root": "0x2757f6fd8590925cd000a86a3e543f98a93eae23781783a33e34504729a8ad0c",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "1",
|
||||
"signature": "0x99dfe11b6c8b306d2c72eb891926d37922d226ea8e1e7484d6c30fab746494f192b0daa3e40c13f1e335b35238f3362c113455a329b1fab0bc500bc47f643786f49e151d5b5052afb51af57ba5aa34a6051dc90ee4de83a26eb54a895061d89a"
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
]`
|
||||
// signature is invalid
|
||||
@@ -2523,18 +2523,6 @@ var (
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "1",
|
||||
"signature": "foo"
|
||||
},
|
||||
{
|
||||
"slot": "1121",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
{
|
||||
"slot": "1121",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "2",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
]`
|
||||
// signatures are invalid
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
@@ -23,6 +24,8 @@ go_test(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -34,25 +35,34 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "config.GetForkSchedule")
|
||||
defer span.End()
|
||||
|
||||
schedule := params.SortedForkSchedule()
|
||||
data := make([]*structs.Fork, 0, len(schedule))
|
||||
schedule := params.BeaconConfig().ForkVersionSchedule
|
||||
if len(schedule) == 0 {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: data,
|
||||
Data: make([]*structs.Fork, 0),
|
||||
})
|
||||
return
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
data = append(data, &structs.Fork{
|
||||
PreviousVersion: hexutil.Encode(previous.ForkVersion[:]),
|
||||
CurrentVersion: hexutil.Encode(entry.ForkVersion[:]),
|
||||
Epoch: fmt.Sprintf("%d", entry.Epoch),
|
||||
})
|
||||
previous = entry
|
||||
|
||||
versions := forks.SortedForkVersions(schedule)
|
||||
chainForks := make([]*structs.Fork, len(schedule))
|
||||
var previous, current []byte
|
||||
for i, v := range versions {
|
||||
if i == 0 {
|
||||
previous = params.BeaconConfig().GenesisForkVersion
|
||||
} else {
|
||||
previous = current
|
||||
}
|
||||
copyV := v
|
||||
current = copyV[:]
|
||||
chainForks[i] = &structs.Fork{
|
||||
PreviousVersion: hexutil.Encode(previous),
|
||||
CurrentVersion: hexutil.Encode(current),
|
||||
Epoch: fmt.Sprintf("%d", schedule[v]),
|
||||
}
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: data,
|
||||
Data: chainForks,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -590,34 +592,43 @@ func TestGetSpec(t *testing.T) {
|
||||
|
||||
func TestForkSchedule_Ok(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
genesisForkVersion := []byte("Genesis")
|
||||
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
|
||||
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
|
||||
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.InitializeForkSchedule()
|
||||
config.GenesisForkVersion = genesisForkVersion
|
||||
// Create fork schedule adding keys in non-sorted order.
|
||||
schedule := make(map[[4]byte]primitives.Epoch, 3)
|
||||
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
|
||||
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
|
||||
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
|
||||
config.ForkVersionSchedule = schedule
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
genesisStr, firstStr, secondStr := hexutil.Encode(config.GenesisForkVersion), hexutil.Encode(config.AltairForkVersion), hexutil.Encode(config.BellatrixForkVersion)
|
||||
GetForkSchedule(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
schedule := params.SortedForkSchedule()
|
||||
require.Equal(t, len(schedule), len(resp.Data))
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
fork := resp.Data[0]
|
||||
assert.Equal(t, genesisStr, fork.PreviousVersion)
|
||||
assert.Equal(t, genesisStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.GenesisEpoch), fork.Epoch)
|
||||
assert.DeepEqual(t, hexutil.Encode(genesisForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", firstForkEpoch), fork.Epoch)
|
||||
fork = resp.Data[1]
|
||||
assert.Equal(t, genesisStr, fork.PreviousVersion)
|
||||
assert.Equal(t, firstStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.AltairForkEpoch), fork.Epoch)
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", secondForkEpoch), fork.Epoch)
|
||||
fork = resp.Data[2]
|
||||
assert.Equal(t, firstStr, fork.PreviousVersion)
|
||||
assert.Equal(t, secondStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.BellatrixForkEpoch), fork.Epoch)
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(thirdForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", thirdForkEpoch), fork.Epoch)
|
||||
})
|
||||
t.Run("correct number of forks", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
@@ -628,8 +639,8 @@ func TestForkSchedule_Ok(t *testing.T) {
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
os := params.SortedForkSchedule()
|
||||
assert.Equal(t, len(os), len(resp.Data))
|
||||
os := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
assert.Equal(t, os.Len(), len(resp.Data))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -12,10 +12,12 @@ go_library(
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -7,10 +7,12 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -109,7 +111,17 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
updateEpoch := slots.ToEpoch(updateSlot)
|
||||
updateEntry := params.GetNetworkScheduleEntry(updateEpoch)
|
||||
updateFork, err := forks.Fork(updateEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get fork Version: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
forkDigest, err := signing.ComputeForkDigest(updateFork.CurrentVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not compute fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
updateSSZ, err := update.MarshalSSZ()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not marshal update to SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
@@ -121,7 +133,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
if _, err := w.Write(forkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
)
|
||||
|
||||
func TestServer_GetBeaconConfig(t *testing.T) {
|
||||
t.Skip("this is a weird test")
|
||||
ctx := t.Context()
|
||||
bs := &Server{}
|
||||
res, err := bs.GetBeaconConfig(ctx, &emptypb.Empty{})
|
||||
|
||||
@@ -109,8 +109,6 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
peerInfo.MetadataV0 = metadata.MetadataObjV0()
|
||||
case metadata.MetadataObjV1() != nil:
|
||||
peerInfo.MetadataV1 = metadata.MetadataObjV1()
|
||||
case metadata.MetadataObjV2() != nil:
|
||||
peerInfo.MetadataV2 = metadata.MetadataObjV2()
|
||||
}
|
||||
}
|
||||
addresses := peerStore.Addrs(pid)
|
||||
@@ -129,7 +127,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
if err != nil {
|
||||
// In the event chain state is non existent, we
|
||||
// initialize with the zero value.
|
||||
pStatus = new(ethpb.StatusV2)
|
||||
pStatus = new(ethpb.Status)
|
||||
}
|
||||
lastUpdated, err := peers.ChainStateLastUpdated(pid)
|
||||
if err != nil {
|
||||
@@ -152,17 +150,6 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
BehaviourPenalty: float32(bPenalty),
|
||||
ValidationError: errorToString(peers.Scorers().ValidationError(pid)),
|
||||
}
|
||||
|
||||
// Convert statusV2 into status
|
||||
// TODO: Should we do it this way or the other way around?
|
||||
peerStatus := ðpb.Status{
|
||||
ForkDigest: pStatus.ForkDigest,
|
||||
FinalizedRoot: pStatus.FinalizedRoot,
|
||||
FinalizedEpoch: pStatus.FinalizedEpoch,
|
||||
HeadRoot: pStatus.HeadRoot,
|
||||
HeadSlot: pStatus.HeadSlot,
|
||||
}
|
||||
|
||||
return ðpb.DebugPeerResponse{
|
||||
ListeningAddresses: stringAddrs,
|
||||
Direction: pbDirection,
|
||||
@@ -170,7 +157,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
PeerId: pid.String(),
|
||||
Enr: enr,
|
||||
PeerInfo: peerInfo,
|
||||
PeerStatus: peerStatus,
|
||||
PeerStatus: pStatus,
|
||||
LastUpdated: unixTime,
|
||||
ScoreInfo: scoreInfo,
|
||||
}, nil
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user