mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
1 Commits
peerDASHac
...
fixE2E
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa1327701e |
1
.bazelrc
1
.bazelrc
@@ -22,7 +22,6 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
28
WORKSPACE
28
WORKSPACE
@@ -29,7 +29,23 @@ http_archive(
|
||||
|
||||
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
|
||||
|
||||
zig_toolchains()
|
||||
# Temporarily use a nightly build until 0.12.0 is released.
|
||||
# See: https://github.com/prysmaticlabs/prysm/issues/13130
|
||||
zig_toolchains(
|
||||
host_platform_sha256 = {
|
||||
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
|
||||
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
|
||||
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
|
||||
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
|
||||
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
|
||||
},
|
||||
url_formats = [
|
||||
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
],
|
||||
version = "0.12.0-dev.1349+fa022d1ec",
|
||||
)
|
||||
|
||||
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
|
||||
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
|
||||
@@ -227,7 +243,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-alpha.0"
|
||||
consensus_spec_version = "v1.4.0"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "33c5547772b6d8d6f041dff7e7d26b0358c2392daed34394a3aa81147812a81c",
|
||||
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "06f286199cf2fedd4700487fb8feb0904e0ae18daaa4b3f70ea430ca9c388167",
|
||||
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +291,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "5f2a4452b323075eba6bf950003f7d91fd04ebcbde5bd087beafb5d6f6325ad4",
|
||||
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +306,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "fd7e83e8cbeb3e297f2aeb93776305f7d606272c97834d8d9be673984501ed36",
|
||||
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -1,24 +1,11 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"constants.go",
|
||||
"headers.go",
|
||||
"jwt.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["jwt_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -4,6 +4,4 @@ const (
|
||||
WebUrlPrefix = "/v2/validator/"
|
||||
WebApiUrlPrefix = "/api/v2/validator/"
|
||||
KeymanagerApiPrefix = "/eth/v1"
|
||||
|
||||
AuthTokenFileName = "auth-token"
|
||||
)
|
||||
|
||||
32
api/jwt.go
32
api/jwt.go
@@ -1,32 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
)
|
||||
|
||||
// GenerateRandomHexString generates a random hex string that follows the standards for jwt token
|
||||
// used for beacon node -> execution client
|
||||
// used for web client -> validator client
|
||||
func GenerateRandomHexString() (string, error) {
|
||||
secret := make([]byte, 32)
|
||||
randGen := rand.NewGenerator()
|
||||
n, err := randGen.Read(secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if n != 32 {
|
||||
return "", errors.New("rand: unexpected length")
|
||||
}
|
||||
return hexutil.Encode(secret), nil
|
||||
}
|
||||
|
||||
// ValidateAuthToken validating auth token for web
|
||||
func ValidateAuthToken(token string) error {
|
||||
b, err := hexutil.Decode(token)
|
||||
// token should be hex-encoded and at least 256 bits
|
||||
if err != nil || len(b) < 32 {
|
||||
return errors.New("invalid auth token: token should be hex-encoded and at least 256 bits")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestGenerateRandomHexString(t *testing.T) {
|
||||
token, err := GenerateRandomHexString()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ValidateAuthToken(token))
|
||||
}
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -49,7 +48,6 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -159,7 +157,6 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -398,6 +399,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// chain known to forkchoice
|
||||
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
@@ -509,6 +518,13 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
return ar[:], nil
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
|
||||
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
|
||||
@@ -33,7 +33,6 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -256,7 +256,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2P = p
|
||||
s.cfg.P2p = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
@@ -515,35 +514,12 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if features.Get().EnablePeerDAS {
|
||||
return s.isDataAvailableDataColumns(ctx, root, signed)
|
||||
}
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -615,86 +591,6 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) isDataAvailableDataColumns(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), params.BeaconConfig().CustodyRequirement)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// expected is the number of custodied data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
// get a map of data column indices that are not currently available.
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The gossip handler for data columns writes the index of each verified data column referencing the given
|
||||
// root to the channel returned by blobNotifiers.forRoot.
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case idx := <-nc:
|
||||
// Delete each index seen in the notification channel.
|
||||
delete(missing, idx)
|
||||
// Read from the channel until there are no more missing sidecars.
|
||||
if len(missing) > 0 {
|
||||
continue
|
||||
}
|
||||
// Once all sidecars have been observed, clean up the notification channel.
|
||||
s.blobNotifiers.delete(root)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
|
||||
@@ -50,12 +50,6 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(context.Context, blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
|
||||
@@ -503,10 +497,7 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
if err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ctx context.Context, ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO use a custom event or new method of for data columns. For speed
|
||||
// we are simply reusing blob paths here.
|
||||
s.sendNewBlobEvent(ds.BlockRoot(), uint64(ds.SignedBlockHeader.Header.Slot))
|
||||
return nil
|
||||
}
|
||||
@@ -82,7 +82,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2P p2p.Acceser
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
|
||||
@@ -95,7 +95,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -45,11 +44,6 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -70,11 +64,6 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -628,11 +628,6 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (c *ChainService) ReceiveDataColumn(_ context.Context, _ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -7,7 +7,6 @@ go_library(
|
||||
"active_balance_disabled.go", # keep
|
||||
"attestation_data.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
|
||||
65
beacon-chain/cache/column_subnet_ids.go
vendored
65
beacon-chain/cache/column_subnet_ids.go
vendored
@@ -1,65 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
subLength := epochDuration * time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
@@ -96,24 +96,6 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -32,9 +32,6 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -80,7 +77,3 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing *ethpb.AttesterSlashing
|
||||
}
|
||||
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -50,8 +50,6 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
|
||||
@@ -140,7 +140,7 @@ func BeaconCommittee(
|
||||
}
|
||||
count := committeesPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
return ComputeCommittee(validatorIndices, seed, indexOffset, count)
|
||||
return computeCommittee(validatorIndices, seed, indexOffset, count)
|
||||
}
|
||||
|
||||
// CommitteeAssignmentContainer represents a committee list, committee index, and to be attested slot for a given epoch.
|
||||
@@ -359,7 +359,7 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch)
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -409,7 +409,7 @@ func ClearCache() {
|
||||
balanceCache.Clear()
|
||||
}
|
||||
|
||||
// ComputeCommittee returns the requested shuffled committee out of the total committees using
|
||||
// computeCommittee returns the requested shuffled committee out of the total committees using
|
||||
// validator indices and seed.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
@@ -424,7 +424,7 @@ func ClearCache() {
|
||||
// start = (len(indices) * index) // count
|
||||
// end = (len(indices) * uint64(index + 1)) // count
|
||||
// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
|
||||
func ComputeCommittee(
|
||||
func computeCommittee(
|
||||
indices []primitives.ValidatorIndex,
|
||||
seed [32]byte,
|
||||
index, count uint64,
|
||||
@@ -451,9 +451,9 @@ func ComputeCommittee(
|
||||
return shuffledList[start:end], nil
|
||||
}
|
||||
|
||||
// PrecomputeProposerIndices computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
// This computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
// the index of the list represents the slot number.
|
||||
func PrecomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers_test
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -22,7 +21,7 @@ import (
|
||||
)
|
||||
|
||||
func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
// Create 10 committees
|
||||
committeeCount := uint64(10)
|
||||
@@ -49,16 +48,16 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(t, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
committees, err := helpers.ComputeCommittee(indices, seed, 0, 1 /* Total committee*/)
|
||||
committees, err := computeCommittee(indices, seed, 0, 1 /* Total committee*/)
|
||||
assert.NoError(t, err, "Could not compute committee")
|
||||
|
||||
// Test shuffled indices are correct for index 5 committee
|
||||
index := uint64(5)
|
||||
committee5, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
|
||||
committee5, err := computeCommittee(indices, seed, index, committeeCount)
|
||||
assert.NoError(t, err, "Could not compute committee")
|
||||
start := slice.SplitOffset(validatorCount, committeeCount, index)
|
||||
end := slice.SplitOffset(validatorCount, committeeCount, index+1)
|
||||
@@ -66,7 +65,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
|
||||
// Test shuffled indices are correct for index 9 committee
|
||||
index = uint64(9)
|
||||
committee9, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
|
||||
committee9, err := computeCommittee(indices, seed, index, committeeCount)
|
||||
assert.NoError(t, err, "Could not compute committee")
|
||||
start = slice.SplitOffset(validatorCount, committeeCount, index)
|
||||
end = slice.SplitOffset(validatorCount, committeeCount, index+1)
|
||||
@@ -74,42 +73,42 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestComputeCommittee_RegressionTest(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
indices := []primitives.ValidatorIndex{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
|
||||
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
|
||||
index := uint64(215)
|
||||
count := uint64(32)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, count)
|
||||
_, err := computeCommittee(indices, seed, index, count)
|
||||
require.ErrorContains(t, "index out of range", err)
|
||||
}
|
||||
|
||||
func TestVerifyBitfieldLength_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
bf := bitfield.Bitlist{0xFF, 0x01}
|
||||
committeeSize := uint64(8)
|
||||
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
|
||||
bf = bitfield.Bitlist{0xFF, 0x07}
|
||||
committeeSize = 10
|
||||
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
epoch := primitives.Epoch(1)
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 0, // Epoch 0.
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1)
|
||||
_, _, err = CommitteeAssignments(context.Background(), state, epoch+1)
|
||||
assert.ErrorContains(t, "can't be greater than next epoch", err)
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -128,7 +127,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, 0)
|
||||
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
for _, ss := range proposerIndexToSlots {
|
||||
for _, s := range ss {
|
||||
@@ -199,9 +198,9 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validatorIndexToCommittee, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
cac := validatorIndexToCommittee[tt.index]
|
||||
assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index)
|
||||
@@ -216,7 +215,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -238,17 +237,17 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndxs, err := helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
_, proposerIndxs, err := CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
|
||||
_, proposerIndxs, err = helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -264,12 +263,12 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = helpers.CommitteeAssignments(context.Background(), state, 0)
|
||||
_, _, err = CommitteeAssignments(context.Background(), state, 0)
|
||||
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -286,7 +285,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
epoch := primitives.Epoch(1)
|
||||
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, epoch)
|
||||
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, epoch)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
|
||||
slotsWithProposers := make(map[primitives.Slot]bool)
|
||||
@@ -392,10 +391,10 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.stateSlot))
|
||||
err := helpers.VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
|
||||
err := VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
|
||||
if tt.verificationFailure {
|
||||
assert.NotNil(t, err, "Verification succeeded when it was supposed to fail")
|
||||
} else {
|
||||
@@ -405,7 +404,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
@@ -422,20 +421,20 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
|
||||
|
||||
epoch := primitives.Epoch(0)
|
||||
idx := primitives.CommitteeIndex(1)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err = helpers.CommitteeCache().Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
|
||||
indices, err = committeeCache.Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
|
||||
}
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
@@ -453,19 +452,19 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
e := time.CurrentEpoch(state)
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e))
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
|
||||
|
||||
seed, err := helpers.Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(seed[:])))
|
||||
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
|
||||
|
||||
nextSeed, err := helpers.Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
|
||||
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
|
||||
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
|
||||
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e+1))
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
|
||||
|
||||
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
|
||||
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
|
||||
}
|
||||
|
||||
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
@@ -482,20 +481,20 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
index := uint64(3)
|
||||
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -516,20 +515,20 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
index := uint64(3)
|
||||
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -550,9 +549,9 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
i := uint64(0)
|
||||
@@ -560,7 +559,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -585,9 +584,9 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
i := uint64(0)
|
||||
@@ -595,7 +594,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -620,9 +619,9 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
i := uint64(0)
|
||||
@@ -630,7 +629,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -656,13 +655,13 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = helpers.BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
|
||||
_, err = BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify previous epoch is cached
|
||||
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
activeIndices, err := helpers.CommitteeCache().ActiveIndices(context.Background(), seed)
|
||||
activeIndices, err := committeeCache.ActiveIndices(context.Background(), seed)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, activeIndices, "Did not cache active indices")
|
||||
}
|
||||
@@ -681,19 +680,19 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerIndices, err := helpers.PrecomputeProposerIndices(state, indices, time.CurrentEpoch(state))
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
|
||||
var wantedProposerIndices []primitives.ValidatorIndex
|
||||
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
seedWithSlotHash := hash.Hash(seedWithSlot)
|
||||
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
require.NoError(t, err)
|
||||
wantedProposerIndices = append(wantedProposerIndices, index)
|
||||
}
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
//go:build fuzz
|
||||
|
||||
package helpers
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
|
||||
func CommitteeCache() *cache.FakeCommitteeCache {
|
||||
return committeeCache
|
||||
}
|
||||
|
||||
func SyncCommitteeCache() *cache.FakeSyncCommitteeCache {
|
||||
return syncCommitteeCache
|
||||
}
|
||||
|
||||
func ProposerIndicesCache() *cache.FakeProposerIndicesCache {
|
||||
return proposerIndicesCache
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package helpers
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
|
||||
func CommitteeCache() *cache.CommitteeCache {
|
||||
return committeeCache
|
||||
}
|
||||
|
||||
func SyncCommitteeCache() *cache.SyncCommitteeCache {
|
||||
return syncCommitteeCache
|
||||
}
|
||||
|
||||
func ProposerIndicesCache() *cache.ProposerIndicesCache {
|
||||
return proposerIndicesCache
|
||||
}
|
||||
@@ -1,10 +1,9 @@
|
||||
package helpers_test
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -41,10 +40,10 @@ func TestRandaoMix_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
|
||||
mix, err := helpers.RandaoMix(state, test.epoch)
|
||||
mix, err := RandaoMix(state, test.epoch)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, test.randaoMix, mix, "Incorrect randao mix")
|
||||
}
|
||||
@@ -77,10 +76,10 @@ func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
|
||||
mix, err := helpers.RandaoMix(state, test.epoch)
|
||||
mix, err := RandaoMix(state, test.epoch)
|
||||
require.NoError(t, err)
|
||||
uniqueNumber := uint64(params.BeaconConfig().EpochsPerHistoricalVector.Add(1000))
|
||||
binary.LittleEndian.PutUint64(mix, uniqueNumber)
|
||||
@@ -93,7 +92,7 @@ func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateSeed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
@@ -108,7 +107,7 @@ func TestGenerateSeed_OK(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := helpers.Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
|
||||
got, err := Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := [32]byte{102, 82, 23, 40, 226, 79, 171, 11, 203, 23, 175, 7, 88, 202, 80,
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
package helpers_test
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -15,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestTotalBalance_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
|
||||
@@ -23,19 +22,19 @@ func TestTotalBalance_OK(t *testing.T) {
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
|
||||
balance := TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
|
||||
wanted := state.Validators()[0].EffectiveBalance + state.Validators()[1].EffectiveBalance +
|
||||
state.Validators()[2].EffectiveBalance + state.Validators()[3].EffectiveBalance
|
||||
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
|
||||
}
|
||||
|
||||
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: []*ethpb.Validator{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{})
|
||||
balance := TotalBalance(state, []primitives.ValidatorIndex{})
|
||||
wanted := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
|
||||
}
|
||||
@@ -52,7 +51,7 @@ func TestGetBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{0, 0, 0}},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Balances: test.b})
|
||||
require.NoError(t, err)
|
||||
@@ -69,7 +68,7 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
@@ -77,7 +76,7 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := helpers.TotalActiveBalance(state)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(test.vCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
|
||||
}
|
||||
@@ -92,7 +91,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
@@ -100,7 +99,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := helpers.TotalActiveBalance(state)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, bal)
|
||||
}
|
||||
@@ -116,7 +115,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
{vCount: 10000, wantCount: 10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
@@ -124,7 +123,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := helpers.TotalActiveBalance(state)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(test.wantCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
|
||||
}
|
||||
@@ -142,7 +141,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
@@ -150,7 +149,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
|
||||
Balances: test.b,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, helpers.IncreaseBalance(state, test.i, test.nb))
|
||||
require.NoError(t, IncreaseBalance(state, test.i, test.nb))
|
||||
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
|
||||
}
|
||||
}
|
||||
@@ -168,7 +167,7 @@ func TestDecreaseBalance_OK(t *testing.T) {
|
||||
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
@@ -176,13 +175,13 @@ func TestDecreaseBalance_OK(t *testing.T) {
|
||||
Balances: test.b,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, helpers.DecreaseBalance(state, test.i, test.nb))
|
||||
require.NoError(t, DecreaseBalance(state, test.i, test.nb))
|
||||
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalityDelay(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
|
||||
base.FinalizedCheckpoint = ðpb.Checkpoint{Epoch: 3}
|
||||
@@ -196,25 +195,25 @@ func TestFinalityDelay(t *testing.T) {
|
||||
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
|
||||
}
|
||||
setVal()
|
||||
d := helpers.FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
d := FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
w := time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
|
||||
assert.Equal(t, w, d, "Did not get wanted finality delay")
|
||||
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 4}))
|
||||
setVal()
|
||||
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
d = FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
|
||||
assert.Equal(t, w, d, "Did not get wanted finality delay")
|
||||
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 5}))
|
||||
setVal()
|
||||
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
d = FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
|
||||
assert.Equal(t, w, d, "Did not get wanted finality delay")
|
||||
}
|
||||
|
||||
func TestIsInInactivityLeak(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
|
||||
base.FinalizedCheckpoint = ðpb.Checkpoint{Epoch: 3}
|
||||
@@ -228,13 +227,13 @@ func TestIsInInactivityLeak(t *testing.T) {
|
||||
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
|
||||
}
|
||||
setVal()
|
||||
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 4}))
|
||||
setVal()
|
||||
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 5}))
|
||||
setVal()
|
||||
assert.Equal(t, false, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
|
||||
assert.Equal(t, false, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
|
||||
}
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
@@ -286,7 +285,7 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
{i: 2, b: []uint64{math.MaxUint64, math.MaxUint64, math.MaxUint64}, nb: 33 * 1e9},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
@@ -294,6 +293,6 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
Balances: test.b,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
|
||||
require.ErrorContains(t, "addition overflows", IncreaseBalance(state, test.i, test.nb))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ var (
|
||||
// 1. Checks if the public key exists in the sync committee cache
|
||||
// 2. If 1 fails, checks if the public key exists in the input current sync committee object
|
||||
func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.ValidatorIndex) (bool, error) {
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -63,7 +63,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
|
||||
func IsNextPeriodSyncCommittee(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func IsNextPeriodSyncCommittee(
|
||||
func CurrentPeriodSyncSubcommitteeIndices(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) ([]primitives.CommitteeIndex, error) {
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
func NextPeriodSyncSubcommitteeIndices(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) ([]primitives.CommitteeIndex, error) {
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -182,10 +182,10 @@ func findSubCommitteeIndices(pubKey []byte, pubKeys [][]byte) []primitives.Commi
|
||||
return indices
|
||||
}
|
||||
|
||||
// SyncPeriodBoundaryRoot computes the current sync period boundary root by calculating sync period start epoch
|
||||
// Retrieve the current sync period boundary root by calculating sync period start epoch
|
||||
// and calling `BlockRoot`.
|
||||
// It uses the boundary slot - 1 for block root. (Ex: SlotsPerEpoch * EpochsPerSyncCommitteePeriod - 1)
|
||||
func SyncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
|
||||
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
|
||||
// Can't call `BlockRoot` until the first slot.
|
||||
if st.Slot() == params.BeaconConfig().GenesisSlot {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers_test
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -18,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -41,15 +40,15 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -71,13 +70,13 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -99,13 +98,13 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "validator index 12390192 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -128,15 +127,15 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -158,13 +157,13 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -186,13 +185,13 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 120391029)
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "validator index 120391029 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -215,15 +214,15 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -244,27 +243,27 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
root, err := helpers.SyncPeriodBoundaryRoot(state)
|
||||
root, err := syncPeriodBoundaryRoot(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that cache was empty.
|
||||
_, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
|
||||
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
|
||||
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
|
||||
|
||||
// Test that helper can retrieve the index given empty cache.
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
|
||||
// Test that cache was able to fill on miss.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
index, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
|
||||
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -286,13 +285,13 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "validator index 129301923 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -315,15 +314,15 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -345,13 +344,13 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -373,43 +372,43 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "validator index 21093019 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = helpers.UpdateSyncCommitteeCache(state)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "not at the end of the epoch to update cache", err)
|
||||
|
||||
state, err = state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch - 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = helpers.UpdateSyncCommitteeCache(state)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "not at sync committee period boundary to update cache", err)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = helpers.UpdateSyncCommitteeCache(state)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "zero hash state root can't be used to update cache", err)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -436,7 +435,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
comIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedSlot := params.BeaconConfig().EpochsPerSyncCommitteePeriod.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
@@ -447,7 +446,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
syncCommittee.Pubkeys[i], syncCommittee.Pubkeys[j] = syncCommittee.Pubkeys[j], syncCommittee.Pubkeys[i]
|
||||
})
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
newIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
newIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, comIdxs, newIdxs)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers_test
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
@@ -33,7 +32,7 @@ func TestIsActiveValidator_OK(t *testing.T) {
|
||||
}
|
||||
for _, test := range tests {
|
||||
validator := ðpb.Validator{ActivationEpoch: 10, ExitEpoch: 100}
|
||||
assert.Equal(t, test.b, helpers.IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
|
||||
assert.Equal(t, test.b, IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,7 +53,7 @@ func TestIsActiveValidatorUsingTrie_OK(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.b, helpers.IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
|
||||
assert.Equal(t, test.b, IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,7 +81,7 @@ func TestIsActiveNonSlashedValidatorUsingTrie_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.b, helpers.IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
|
||||
assert.Equal(t, test.b, IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,7 +161,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Run("without trie", func(t *testing.T) {
|
||||
slashableValidator := helpers.IsSlashableValidator(test.validator.ActivationEpoch,
|
||||
slashableValidator := IsSlashableValidator(test.validator.ActivationEpoch,
|
||||
test.validator.WithdrawableEpoch, test.validator.Slashed, test.epoch)
|
||||
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
|
||||
})
|
||||
@@ -171,7 +170,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
|
||||
require.NoError(t, err)
|
||||
slashableValidator := helpers.IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
|
||||
slashableValidator := IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
|
||||
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
|
||||
})
|
||||
})
|
||||
@@ -224,17 +223,17 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.slot))
|
||||
result, err := helpers.BeaconProposerIndex(context.Background(), state)
|
||||
result, err := BeaconProposerIndex(context.Background(), state)
|
||||
require.NoError(t, err, "Failed to get shard and committees at slot")
|
||||
assert.Equal(t, tt.index, result, "Result index was an unexpected value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig()
|
||||
@@ -262,12 +261,12 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
// Set a very high slot, so that retrieved block root will be
|
||||
// non existent for the proposer cache.
|
||||
require.NoError(t, state.SetSlot(100))
|
||||
_, err = helpers.BeaconProposerIndex(context.Background(), state)
|
||||
_, err = BeaconProposerIndex(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -282,22 +281,22 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var proposerIndices []primitives.ValidatorIndex
|
||||
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
seedWithSlotHash := hash.Hash(seedWithSlot)
|
||||
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
require.NoError(t, err)
|
||||
proposerIndices = append(proposerIndices, index)
|
||||
}
|
||||
|
||||
var wantedProposerIndices []primitives.ValidatorIndex
|
||||
seed, err = helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err = Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
@@ -310,15 +309,15 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDelayedActivationExitEpoch_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
epoch := primitives.Epoch(9999)
|
||||
wanted := epoch + 1 + params.BeaconConfig().MaxSeedLookahead
|
||||
assert.Equal(t, wanted, helpers.ActivationExitEpoch(epoch))
|
||||
assert.Equal(t, wanted, ActivationExitEpoch(epoch))
|
||||
}
|
||||
|
||||
func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
c := 1000
|
||||
validators := make([]*ethpb.Validator, c)
|
||||
@@ -335,10 +334,10 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Preset cache to a bad count.
|
||||
seed, err := helpers.Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, helpers.CommitteeCache().AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
|
||||
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, committeeCache.AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count")
|
||||
}
|
||||
@@ -354,7 +353,7 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -369,9 +368,9 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
resultChurn := helpers.ValidatorActivationChurnLimit(validatorCount)
|
||||
resultChurn := ValidatorActivationChurnLimit(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
|
||||
}
|
||||
}
|
||||
@@ -387,7 +386,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
}
|
||||
for _, test := range tests {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
// Create validators
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
@@ -406,11 +405,11 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get active validator count
|
||||
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test churn limit calculation
|
||||
resultChurn := helpers.ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn)
|
||||
}
|
||||
}
|
||||
@@ -575,11 +574,11 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
|
||||
require.NoError(t, err)
|
||||
got, err := helpers.ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
@@ -685,12 +684,12 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
bState := ðpb.BeaconState{Validators: tt.args.validators}
|
||||
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
|
||||
require.NoError(t, err)
|
||||
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
got, err := ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
@@ -719,9 +718,9 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
|
||||
assert.Equal(t, tt.want, IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -748,11 +747,11 @@ func TestIsIsEligibleForActivation(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
|
||||
assert.Equal(t, tt.want, IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -766,7 +765,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
|
||||
for i := uint64(0); ; i++ {
|
||||
candidateIndex, err := helpers.ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
|
||||
candidateIndex, err := ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -788,7 +787,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
}
|
||||
|
||||
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
@@ -807,13 +806,13 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
require.NoError(t, beaconState.SetBalances(balances))
|
||||
|
||||
index, err := helpers.LastActivatedValidatorIndex(context.Background(), beaconState)
|
||||
index, err := LastActivatedValidatorIndex(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, index, primitives.ValidatorIndex(3))
|
||||
}
|
||||
|
||||
func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
ClearCache()
|
||||
|
||||
e := primitives.Epoch(2)
|
||||
r := [32]byte{'a'}
|
||||
@@ -821,10 +820,10 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
ids := [32]primitives.ValidatorIndex{}
|
||||
slot := primitives.Slot(69) // slot 5 in the Epoch
|
||||
ids[5] = primitives.ValidatorIndex(19)
|
||||
helpers.ProposerIndicesCache().Set(e, r, ids)
|
||||
proposerIndicesCache.Set(e, r, ids)
|
||||
c := &forkchoicetypes.Checkpoint{Root: root, Epoch: e - 1}
|
||||
helpers.ProposerIndicesCache().SetCheckpoint(*c, r)
|
||||
id, err := helpers.ProposerIndexAtSlotFromCheckpoint(c, slot)
|
||||
proposerIndicesCache.SetCheckpoint(*c, r)
|
||||
id, err := ProposerIndexAtSlotFromCheckpoint(c, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ids[5], id)
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,240 +0,0 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/holiman/uint256"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
// Bytes per cell
|
||||
bytesPerCell = cKzg4844.FieldElementsPerCell * cKzg4844.BytesPerFieldElement
|
||||
|
||||
// Number of cells in the extended matrix
|
||||
extendedMatrixSize = fieldparams.MaxBlobsPerBlock * cKzg4844.CellsPerExtBlob
|
||||
)
|
||||
|
||||
type (
|
||||
extendedMatrix []cKzg4844.Cell
|
||||
|
||||
cellCoordinate struct {
|
||||
blobIndex uint64
|
||||
cellID uint64
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
|
||||
errCellNotFound = errors.New("cell not found (should never happen)")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
|
||||
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the custodied subnets.
|
||||
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody subnets")
|
||||
}
|
||||
|
||||
columnsPerSubnet := cKzg4844.CellsPerExtBlob / dataColumnSidecarSubnetCount
|
||||
|
||||
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
|
||||
// Columns belonging to the same subnet are contiguous.
|
||||
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
|
||||
for i := uint64(0); i < columnsPerSubnet; i++ {
|
||||
for subnetId := range subnetIds {
|
||||
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
|
||||
columnIndices[columnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columnIndices, nil
|
||||
}
|
||||
|
||||
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Check if the custody subnet count is larger than the data column sidecar subnet count.
|
||||
if custodySubnetCount > dataColumnSidecarSubnetCount {
|
||||
return nil, errCustodySubnetCountTooLarge
|
||||
}
|
||||
|
||||
// First, compute the subnet IDs that the node should participate in.
|
||||
subnetIds := make(map[uint64]bool, custodySubnetCount)
|
||||
|
||||
for i := uint64(0); uint64(len(subnetIds)) < custodySubnetCount; i++ {
|
||||
nodeIdUInt256, nextNodeIdUInt256 := new(uint256.Int), new(uint256.Int)
|
||||
nodeIdUInt256.SetBytes(nodeId.Bytes())
|
||||
nextNodeIdUInt256.Add(nodeIdUInt256, uint256.NewInt(i))
|
||||
nextNodeIdUInt64 := nextNodeIdUInt256.Uint64()
|
||||
nextNodeId := bytesutil.Uint64ToBytesLittleEndian(nextNodeIdUInt64)
|
||||
|
||||
hashedNextNodeId := hash.Hash(nextNodeId)
|
||||
subnetId := binary.LittleEndian.Uint64(hashedNextNodeId[:8]) % dataColumnSidecarSubnetCount
|
||||
|
||||
if _, exists := subnetIds[subnetId]; !exists {
|
||||
subnetIds[subnetId] = true
|
||||
}
|
||||
}
|
||||
|
||||
return subnetIds, nil
|
||||
}
|
||||
|
||||
// ComputeExtendedMatrix computes the extended matrix from the blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#compute_extended_matrix
|
||||
func ComputeExtendedMatrix(blobs []cKzg4844.Blob) (extendedMatrix, error) {
|
||||
matrix := make(extendedMatrix, 0, extendedMatrixSize)
|
||||
|
||||
for i := range blobs {
|
||||
// Chunk a non-extended blob into cells representing the corresponding extended blob.
|
||||
blob := &blobs[i]
|
||||
cells, err := cKzg4844.ComputeCells(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells for blob")
|
||||
}
|
||||
|
||||
matrix = append(matrix, cells[:]...)
|
||||
}
|
||||
|
||||
return matrix, nil
|
||||
}
|
||||
|
||||
// RecoverMatrix recovers the extended matrix from some cells.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func RecoverMatrix(cellFromCoordinate map[cellCoordinate]cKzg4844.Cell, blobCount uint64) (extendedMatrix, error) {
|
||||
matrix := make(extendedMatrix, 0, extendedMatrixSize)
|
||||
|
||||
for blobIndex := uint64(0); blobIndex < blobCount; blobIndex++ {
|
||||
// Filter all cells that belong to the current blob.
|
||||
cellIds := make([]uint64, 0, cKzg4844.CellsPerExtBlob)
|
||||
for coordinate := range cellFromCoordinate {
|
||||
if coordinate.blobIndex == blobIndex {
|
||||
cellIds = append(cellIds, coordinate.cellID)
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve cells corresponding to all `cellIds`.
|
||||
cellIdsCount := len(cellIds)
|
||||
|
||||
cells := make([]cKzg4844.Cell, 0, cellIdsCount)
|
||||
for _, cellId := range cellIds {
|
||||
coordinate := cellCoordinate{blobIndex: blobIndex, cellID: cellId}
|
||||
cell, ok := cellFromCoordinate[coordinate]
|
||||
if !ok {
|
||||
return matrix, errCellNotFound
|
||||
}
|
||||
|
||||
cells = append(cells, cell)
|
||||
}
|
||||
|
||||
// Recover all cells.
|
||||
allCellsForRow, err := cKzg4844.RecoverAllCells(cellIds, cells)
|
||||
if err != nil {
|
||||
return matrix, errors.Wrap(err, "recover all cells")
|
||||
}
|
||||
|
||||
matrix = append(matrix, allCellsForRow[:]...)
|
||||
}
|
||||
|
||||
return matrix, nil
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.SignedBeaconBlock, blobs []cKzg4844.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
blobsCount := len(blobs)
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
|
||||
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
|
||||
cells = append(cells, blobCells)
|
||||
proofs = append(proofs, blobProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, cKzg4844.CellsPerExtBlob)
|
||||
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
|
||||
column := make([]cKzg4844.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cell := cells[rowIndex][columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofs[rowIndex][columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
cell := column[i]
|
||||
|
||||
cellBytes := make([]byte, 0, bytesPerCell)
|
||||
for _, fieldElement := range cell {
|
||||
cellBytes = append(cellBytes, fieldElement[:]...)
|
||||
}
|
||||
|
||||
columnBytes = append(columnBytes, cellBytes)
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
if e.diskSummary.AllAvailable(kc.count()) {
|
||||
return nil, nil
|
||||
}
|
||||
scs := make([]blocks.ROBlob, 0, kc.count())
|
||||
scs := make([]blocks.ROBlob, kc.count())
|
||||
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
// We already have this blob, we don't need to write it or validate it.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
@@ -111,7 +111,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
|
||||
}
|
||||
scs = append(scs, *e.scs[i])
|
||||
scs[i] = *e.scs[i]
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
|
||||
@@ -3,14 +3,9 @@ package das
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestCacheEnsureDelete(t *testing.T) {
|
||||
@@ -28,145 +23,3 @@ func TestCacheEnsureDelete(t *testing.T) {
|
||||
var nilEntry *cacheEntry
|
||||
require.Equal(t, nilEntry, c.entries[k])
|
||||
}
|
||||
|
||||
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
|
||||
|
||||
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
|
||||
return func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
|
||||
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
entry := &cacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
expected := make([]blocks.ROBlob, 0, nBlobs)
|
||||
for i := 0; i < commits.count(); i++ {
|
||||
if entry.diskSummary.HasIndex(uint64(i)) {
|
||||
continue
|
||||
}
|
||||
// If we aren't telling the cache a blob is on disk, add it to the expected list and stash.
|
||||
expected = append(expected, blobs[i])
|
||||
require.NoError(t, entry.stash(&blobs[i]))
|
||||
}
|
||||
require.Equal(t, numExpected, len(expected))
|
||||
return entry, commits, expected
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterDiskSummary(t *testing.T) {
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup filterTestCaseSetupFunc
|
||||
}{
|
||||
{
|
||||
name: "full blobs, all on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{0, 1, 2, 3, 4, 5}, 0),
|
||||
},
|
||||
{
|
||||
name: "full blobs, first on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{0}, 5),
|
||||
},
|
||||
{
|
||||
name: "full blobs, middle on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{2}, 5),
|
||||
},
|
||||
{
|
||||
name: "full blobs, last on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{5}, 5),
|
||||
},
|
||||
{
|
||||
name: "full blobs, none on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{}, 6),
|
||||
},
|
||||
{
|
||||
name: "one commitment, on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 1, []int{0}, 0),
|
||||
},
|
||||
{
|
||||
name: "one commitment, not on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 1, []int{}, 1),
|
||||
},
|
||||
{
|
||||
name: "two commitments, first on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{0}, 1),
|
||||
},
|
||||
{
|
||||
name: "two commitments, last on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{1}, 1),
|
||||
},
|
||||
{
|
||||
name: "two commitments, none on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{}, 2),
|
||||
},
|
||||
{
|
||||
name: "two commitments, all on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{0, 1}, 0),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "commitments mismatch - extra sidecar",
|
||||
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
commits[5] = nil
|
||||
return entry, commits, expected
|
||||
},
|
||||
err: errCommitmentMismatch,
|
||||
},
|
||||
{
|
||||
name: "sidecar missing",
|
||||
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
entry.scs[5] = nil
|
||||
return entry, commits, expected
|
||||
},
|
||||
err: errMissingSidecar,
|
||||
},
|
||||
{
|
||||
name: "commitments mismatch - different bytes",
|
||||
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
entry.scs[5].KzgCommitment = []byte("nope")
|
||||
return entry, commits, expected
|
||||
},
|
||||
err: errCommitmentMismatch,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package filesystem
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -218,101 +217,6 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves a data column to our local filesystem.
|
||||
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForDataColumn(column)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
log.Debug("Ignoring a duplicate data column sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
if bs.pruner != nil {
|
||||
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
|
||||
sidecarData, err := column.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
// TODO: Use new metrics for data columns
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -338,20 +242,6 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.DataColumnSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
@@ -395,41 +285,6 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// ColumnIndices retrieve the stored column indexes from our filesystem.
|
||||
func (bs *BlobStorage) ColumnIndices(root [32]byte) ([fieldparams.NumberOfColumns]bool, error) {
|
||||
var mask [fieldparams.NumberOfColumns]bool
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return mask, nil
|
||||
}
|
||||
return mask, err
|
||||
}
|
||||
for i := range entries {
|
||||
if entries[i].IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entries[i].Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
u, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
if u >= fieldparams.NumberOfColumns {
|
||||
return mask, errIndexOutOfBounds
|
||||
}
|
||||
mask[u] = true
|
||||
}
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (bs *BlobStorage) Clear() error {
|
||||
dirs, err := listDir(bs.fs, ".")
|
||||
@@ -444,15 +299,6 @@ func (bs *BlobStorage) Clear() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithinRetentionPeriod checks if the requested epoch is within the blob retention period.
|
||||
func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch) bool {
|
||||
if requested > math.MaxUint64-bs.retentionEpochs {
|
||||
// If there is an overflow, then the retention period was set to an extremely large number.
|
||||
return true
|
||||
}
|
||||
return requested+bs.retentionEpochs >= current
|
||||
}
|
||||
|
||||
type blobNamer struct {
|
||||
root [32]byte
|
||||
index uint64
|
||||
@@ -462,10 +308,6 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
|
||||
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
@@ -25,7 +24,8 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := namerForSidecar(existingSidecar).path()
|
||||
@@ -129,7 +129,8 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
|
||||
@@ -160,7 +161,8 @@ func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
|
||||
|
||||
func TestBlobStoragePrune(t *testing.T) {
|
||||
currentSlot := primitives.Slot(200000)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("PruneOne", func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, fieldparams.MaxBlobsPerBlock)
|
||||
@@ -216,7 +218,8 @@ func TestBlobStoragePrune(t *testing.T) {
|
||||
|
||||
func BenchmarkPruning(b *testing.B) {
|
||||
var t *testing.T
|
||||
_, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
_, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockQty := 10000
|
||||
currentSlot := primitives.Slot(150000)
|
||||
@@ -245,50 +248,3 @@ func TestNewBlobStorage(t *testing.T) {
|
||||
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConfig_WithinRetentionPeriod(t *testing.T) {
|
||||
retention := primitives.Epoch(16)
|
||||
storage := &BlobStorage{retentionEpochs: retention}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
requested primitives.Epoch
|
||||
current primitives.Epoch
|
||||
within bool
|
||||
}{
|
||||
{
|
||||
name: "before",
|
||||
requested: 0,
|
||||
current: retention + 1,
|
||||
within: false,
|
||||
},
|
||||
{
|
||||
name: "same",
|
||||
requested: 0,
|
||||
current: 0,
|
||||
within: true,
|
||||
},
|
||||
{
|
||||
name: "boundary",
|
||||
requested: 0,
|
||||
current: retention,
|
||||
within: true,
|
||||
},
|
||||
{
|
||||
name: "one less",
|
||||
requested: retention - 1,
|
||||
current: retention,
|
||||
within: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.Equal(t, c.within, storage.WithinRetentionPeriod(c.requested, c.current))
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("overflow", func(t *testing.T) {
|
||||
storage := &BlobStorage{retentionEpochs: math.MaxUint64}
|
||||
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -21,13 +21,13 @@ func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage) {
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
return fs, &BlobStorage{fs: fs, pruner: pruner}
|
||||
return fs, &BlobStorage{fs: fs, pruner: pruner}, nil
|
||||
}
|
||||
|
||||
type BlobMocker struct {
|
||||
|
||||
@@ -51,7 +51,8 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
require.Equal(t, 0, pruned)
|
||||
})
|
||||
t.Run("blobs to delete", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
@@ -82,7 +83,8 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
|
||||
func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
t.Run("expired blobs deleted", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
@@ -114,7 +116,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
t.Run("not expired, intact", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
// Set slot equal to the window size, so it should be retained.
|
||||
slot := bs.pruner.windowSize
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
@@ -181,7 +184,8 @@ func TestSlotFromFile(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -643,7 +643,9 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
if err := b.BlobStorage.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
|
||||
@@ -46,7 +46,6 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -57,7 +56,6 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
@@ -97,12 +96,7 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
att *ethpb.Attestation,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -158,7 +152,7 @@ func (s *Service) internalBroadcastAttestation(
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -234,12 +228,7 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
blobSidecar *ethpb.BlobSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -254,7 +243,7 @@ func (s *Service) internalBroadcastBlob(
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarBroadcastAttempts.Inc()
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
@@ -263,7 +252,7 @@ func (s *Service) internalBroadcastBlob(
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarBroadcasts.Inc()
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
@@ -279,99 +268,6 @@ func (s *Service) internalBroadcastBlob(
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
// TODO: Add tests
|
||||
func (s *Service) BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.New("attempted to broadcast nil data column sidecar")
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, columnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Clear parent context / deadline.
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Check if we have peers with this subnet.
|
||||
hasPeer := func() bool {
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
defer s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
return s.hasPeerWithSubnet(topic)
|
||||
}()
|
||||
|
||||
// If no peers are found, attempt to find peers with this subnet.
|
||||
if !hasPeer {
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find peers for subnet")
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
blobSidecarBroadcasts.Inc()
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -401,18 +297,14 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -40,21 +40,15 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
type (
|
||||
quicProtocol uint16
|
||||
custodySubnetCount uint64
|
||||
)
|
||||
type quicProtocol uint16
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
func (custodySubnetCount) ENRKey() string { return "custody_subnet_count" }
|
||||
|
||||
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
|
||||
// This routine checks for our attestation, sync committee and data column subnets and updates them if they have
|
||||
// been rotated.
|
||||
func (s *Service) RefreshPersistentSubnets() {
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// return early if discv5 isnt running
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
@@ -64,10 +58,6 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
log.WithError(err).Error("Could not initialize persistent subnets")
|
||||
return
|
||||
}
|
||||
if err := initializePersistentColumnSubnets(s.dv5Listener.LocalNode().ID()); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent column subnets")
|
||||
return
|
||||
}
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
committees := cache.SubnetIDs.GetAllSubnets()
|
||||
@@ -254,11 +244,6 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if features.Get().EnablePeerDAS {
|
||||
custodySubnetEntry := custodySubnetCount(params.BeaconConfig().CustodyRequirement)
|
||||
localNode.Set(custodySubnetEntry)
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
|
||||
@@ -317,7 +317,7 @@ func TestHostIsResolved(t *testing.T) {
|
||||
// As defined in RFC 2606 , example.org is a
|
||||
// reserved example domain name.
|
||||
exampleHost := "example.org"
|
||||
exampleIP := "93.184.215.14"
|
||||
exampleIP := "93.184.216.34"
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
@@ -601,7 +601,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcBuilder(t)
|
||||
s.RefreshPersistentSubnets()
|
||||
s.RefreshENR()
|
||||
tt.postValidation(t, s)
|
||||
s.dv5Listener.Close()
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
@@ -22,7 +22,6 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
BlobSubnetTopicFormat: ðpb.BlobSidecar{},
|
||||
DataColumnSubnetTopicFormat: ðpb.DataColumnSidecar{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -3,7 +3,6 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
@@ -31,18 +30,12 @@ type P2P interface {
|
||||
MetadataProvider
|
||||
}
|
||||
|
||||
type Acceser interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
type Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
@@ -88,9 +81,8 @@ type PeerManager interface {
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshPersistentSubnets()
|
||||
RefreshENR()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
|
||||
@@ -60,21 +60,17 @@ var (
|
||||
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
|
||||
"until a subnet peer can be found.",
|
||||
})
|
||||
blobSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_broadcasts",
|
||||
Help: "The number of blob sidecar messages that were broadcast with no peer on.",
|
||||
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
|
||||
})
|
||||
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
blobSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
|
||||
Help: "The number of blob sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
dataColumnSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_attempted_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were attempted to be broadcast.",
|
||||
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
|
||||
@@ -86,7 +86,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
|
||||
}
|
||||
|
||||
log.Infof("Running node with peer id of %s ", id.String())
|
||||
log.WithField("peerId", id).Info("Running node with id")
|
||||
|
||||
options := []libp2p.Option{
|
||||
privKeyOption(priKey),
|
||||
|
||||
@@ -165,14 +165,14 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
|
||||
addrs, err := PeersFromStringAddrs(peers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot convert peers raw ENRs into multiaddresses: %v", err)
|
||||
return nil, fmt.Errorf("Cannot convert peers raw ENRs into multiaddresses: %v", err)
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("converting peers raw ENRs into multiaddresses resulted in an empty list")
|
||||
return nil, fmt.Errorf("Converting peers raw ENRs into multiaddresses resulted in an empty list")
|
||||
}
|
||||
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot convert peers multiaddresses into AddrInfos: %v", err)
|
||||
return nil, fmt.Errorf("Cannot convert peers multiaddresses into AddrInfos: %v", err)
|
||||
}
|
||||
return directAddrInfos, nil
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat || topic == DataColumnSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
|
||||
@@ -43,9 +43,6 @@ const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -68,9 +65,6 @@ const (
|
||||
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
|
||||
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root. New in PeerDAS.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
|
||||
@@ -226,7 +226,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
// current epoch.
|
||||
s.RefreshPersistentSubnets()
|
||||
s.RefreshENR()
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
|
||||
@@ -234,7 +234,7 @@ func (s *Service) Start() {
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshPersistentSubnets)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
|
||||
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
|
||||
@@ -358,15 +358,6 @@ func (s *Service) ENR() *enr.Record {
|
||||
return s.dv5Listener.Self().Record()
|
||||
}
|
||||
|
||||
// NodeID returns the local node's node ID
|
||||
// for discovery.
|
||||
func (s *Service) NodeID() enode.ID {
|
||||
if s.dv5Listener == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return s.dv5Listener.Self().ID()
|
||||
}
|
||||
|
||||
// DiscoveryAddresses represents our enr addresses as multiaddresses.
|
||||
func (s *Service) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if s.dv5Listener == nil {
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
@@ -35,8 +34,8 @@ var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
// The value used with the subnet, inorder
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// sync subnets from others. This is deliberately
|
||||
// chosen as more than 64 (attestation subnet count).
|
||||
// sync subnets from attestation subnets. This is deliberately
|
||||
// chosen as more than 64(attestation subnet count).
|
||||
const syncLockerVal = 100
|
||||
|
||||
// The value used with the blob sidecar subnet, in order
|
||||
@@ -46,13 +45,6 @@ const syncLockerVal = 100
|
||||
// chosen more than sync and attestation subnet combined.
|
||||
const blobSubnetLockerVal = 110
|
||||
|
||||
// The value used with the data column sidecar subnet, in order
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// data column subnets from others. This is deliberately
|
||||
// chosen more than sync, attestation and blob subnet (6) combined.
|
||||
const dataColumnSubnetVal = 150
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then it tries to connect
|
||||
// with those peers. This method will block until either:
|
||||
@@ -210,25 +202,6 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func initializePersistentColumnSubnets(id enode.ID) error {
|
||||
_, ok, expTime := cache.ColumnSubnetIDs.GetColumnSubnets()
|
||||
if ok && expTime.After(time.Now()) {
|
||||
return nil
|
||||
}
|
||||
subsMap, err := peerdas.CustodyColumnSubnets(id, params.BeaconConfig().CustodyRequirement)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subs := make([]uint64, 0, len(subsMap))
|
||||
for sub := range subsMap {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
|
||||
cache.ColumnSubnetIDs.AddColumnSubnets(subs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]:
|
||||
@@ -378,11 +351,10 @@ func syncBitvector(record *enr.Record) (bitfield.Bitvector4, error) {
|
||||
|
||||
// The subnet locker is a map which keeps track of all
|
||||
// mutexes stored per subnet. This locker is re-used
|
||||
// between both the attestation, sync and blob subnets.
|
||||
// Sync subnets are stored by (subnet+syncLockerVal).
|
||||
// Blob subnets are stored by (subnet+blobSubnetLockerVal).
|
||||
// Data column subnets are stored by (subnet+dataColumnSubnetVal).
|
||||
// This is to prevent conflicts while allowing subnets
|
||||
// between both the attestation and sync subnets. In
|
||||
// order to differentiate between attestation and sync
|
||||
// subnets. Sync subnets are stored by (subnet+syncLockerVal). This
|
||||
// is to prevent conflicts while allowing both subnets
|
||||
// to use a single locker.
|
||||
func (s *Service) subnetLocker(i uint64) *sync.RWMutex {
|
||||
s.subnetsLockLock.Lock()
|
||||
|
||||
@@ -3,7 +3,6 @@ package testing
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/control"
|
||||
@@ -56,11 +55,6 @@ func (_ *FakeP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
func (_ *FakeP2P) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses -- fake
|
||||
func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
@@ -72,7 +66,7 @@ func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ i
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *FakeP2P) RefreshPersistentSubnets() {}
|
||||
func (_ *FakeP2P) RefreshENR() {}
|
||||
|
||||
// LeaveTopic -- fake.
|
||||
func (_ *FakeP2P) LeaveTopic(_ string) error {
|
||||
@@ -154,11 +148,6 @@ func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSideca
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn -- fake.
|
||||
func (_ *FakeP2P) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InterceptPeerDial -- fake.
|
||||
func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
|
||||
@@ -48,12 +48,6 @@ func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.BlobSide
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumn(context.Context, uint64, *ethpb.DataColumnSidecar) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumMessages returns the number of messages broadcasted.
|
||||
func (m *MockBroadcaster) NumMessages() int {
|
||||
m.msgLock.Lock()
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -40,11 +39,6 @@ func (m MockPeerManager) ENR() *enr.Record {
|
||||
return m.Enr
|
||||
}
|
||||
|
||||
// NodeID .
|
||||
func (m MockPeerManager) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses .
|
||||
func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if m.FailDiscoveryAddr {
|
||||
@@ -54,7 +48,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
}
|
||||
|
||||
// RefreshENR .
|
||||
func (_ MockPeerManager) RefreshPersistentSubnets() {}
|
||||
func (_ MockPeerManager) RefreshENR() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
core "github.com/libp2p/go-libp2p/core"
|
||||
@@ -184,12 +183,6 @@ func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.BlobSidecar) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumn(context.Context, uint64, *ethpb.DataColumnSidecar) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStreamHandler for RPC.
|
||||
func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
|
||||
p.BHost.SetStreamHandler(protocol.ID(topic), handler)
|
||||
@@ -270,11 +263,6 @@ func (_ *TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
func (_ *TestP2P) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses --
|
||||
func (_ *TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
@@ -373,7 +361,7 @@ func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ i
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *TestP2P) RefreshPersistentSubnets() {}
|
||||
func (_ *TestP2P) RefreshENR() {}
|
||||
|
||||
// ForkDigest mocks the p2p func.
|
||||
func (p *TestP2P) ForkDigest() ([4]byte, error) {
|
||||
|
||||
@@ -30,9 +30,6 @@ const (
|
||||
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
||||
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
|
||||
GossipBlobSidecarMessage = "blob_sidecar"
|
||||
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
|
||||
GossipDataColumnSidecarMessage = "data_column_sidecar"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
|
||||
@@ -55,6 +52,4 @@ const (
|
||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||
// BlobSubnetTopicFormat is the topic format for the blob subnet.
|
||||
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
@@ -9,15 +9,10 @@ var (
|
||||
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
|
||||
ErrGeneric = errors.New("internal service error")
|
||||
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
|
||||
|
||||
ErrDataColumnLTMinRequest = errors.New("data column epoch < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrMaxDataColumnReqExceeded = errors.New("requested more than MAX_REQUEST_DATA_COLUMN_SIDECARS")
|
||||
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrResourceUnavailable = errors.New("resource requested unavailable")
|
||||
ErrInvalidColumnIndex = errors.New("invalid column index requested")
|
||||
)
|
||||
|
||||
@@ -369,7 +369,16 @@ func decodeIds(w http.ResponseWriter, st state.BeaconState, rawIds []string, ign
|
||||
func valsFromIds(w http.ResponseWriter, st state.BeaconState, ids []primitives.ValidatorIndex) ([]state.ReadOnlyValidator, bool) {
|
||||
var vals []state.ReadOnlyValidator
|
||||
if len(ids) == 0 {
|
||||
vals = st.ValidatorsReadOnly()
|
||||
allVals := st.Validators()
|
||||
vals = make([]state.ReadOnlyValidator, len(allVals))
|
||||
for i, val := range allVals {
|
||||
readOnlyVal, err := statenative.NewValidator(val)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not convert validator: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil, false
|
||||
}
|
||||
vals[i] = readOnlyVal
|
||||
}
|
||||
} else {
|
||||
vals = make([]state.ReadOnlyValidator, 0, len(ids))
|
||||
for _, id := range ids {
|
||||
|
||||
@@ -78,8 +78,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.CapellaForkEpoch = 103
|
||||
config.DenebForkVersion = []byte("DenebForkVersion")
|
||||
config.DenebForkEpoch = 105
|
||||
config.ElectraForkVersion = []byte("ElectraForkVersion")
|
||||
config.ElectraForkEpoch = 107
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
||||
config.GenesisDelay = 24
|
||||
@@ -172,309 +170,293 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 136, len(data))
|
||||
assert.Equal(t, 129, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
assert.Equal(t, "ConfigName", v)
|
||||
case "PRESET_BASE":
|
||||
assert.Equal(t, "PresetBase", v)
|
||||
case "MAX_COMMITTEES_PER_SLOT":
|
||||
assert.Equal(t, "1", v)
|
||||
case "TARGET_COMMITTEE_SIZE":
|
||||
assert.Equal(t, "2", v)
|
||||
case "MAX_VALIDATORS_PER_COMMITTEE":
|
||||
assert.Equal(t, "3", v)
|
||||
case "MIN_PER_EPOCH_CHURN_LIMIT":
|
||||
assert.Equal(t, "4", v)
|
||||
case "CHURN_LIMIT_QUOTIENT":
|
||||
assert.Equal(t, "5", v)
|
||||
case "SHUFFLE_ROUND_COUNT":
|
||||
assert.Equal(t, "6", v)
|
||||
case "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":
|
||||
assert.Equal(t, "7", v)
|
||||
case "MIN_GENESIS_TIME":
|
||||
assert.Equal(t, "8", v)
|
||||
case "HYSTERESIS_QUOTIENT":
|
||||
assert.Equal(t, "9", v)
|
||||
case "HYSTERESIS_DOWNWARD_MULTIPLIER":
|
||||
assert.Equal(t, "10", v)
|
||||
case "HYSTERESIS_UPWARD_MULTIPLIER":
|
||||
assert.Equal(t, "11", v)
|
||||
case "SAFE_SLOTS_TO_UPDATE_JUSTIFIED":
|
||||
assert.Equal(t, "0", v)
|
||||
case "ETH1_FOLLOW_DISTANCE":
|
||||
assert.Equal(t, "13", v)
|
||||
case "TARGET_AGGREGATORS_PER_COMMITTEE":
|
||||
assert.Equal(t, "14", v)
|
||||
case "RANDOM_SUBNETS_PER_VALIDATOR":
|
||||
assert.Equal(t, "15", v)
|
||||
case "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION":
|
||||
assert.Equal(t, "16", v)
|
||||
case "SECONDS_PER_ETH1_BLOCK":
|
||||
assert.Equal(t, "17", v)
|
||||
case "DEPOSIT_CHAIN_ID":
|
||||
assert.Equal(t, "18", v)
|
||||
case "DEPOSIT_NETWORK_ID":
|
||||
assert.Equal(t, "19", v)
|
||||
case "DEPOSIT_CONTRACT_ADDRESS":
|
||||
assert.Equal(t, "DepositContractAddress", v)
|
||||
case "MIN_DEPOSIT_AMOUNT":
|
||||
assert.Equal(t, "20", v)
|
||||
case "MAX_EFFECTIVE_BALANCE":
|
||||
assert.Equal(t, "21", v)
|
||||
case "EJECTION_BALANCE":
|
||||
assert.Equal(t, "22", v)
|
||||
case "EFFECTIVE_BALANCE_INCREMENT":
|
||||
assert.Equal(t, "23", v)
|
||||
case "GENESIS_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("GenesisForkVersion")), v)
|
||||
case "ALTAIR_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("AltairForkVersion")), v)
|
||||
case "ALTAIR_FORK_EPOCH":
|
||||
assert.Equal(t, "100", v)
|
||||
case "BELLATRIX_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("BellatrixForkVersion")), v)
|
||||
case "BELLATRIX_FORK_EPOCH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "CAPELLA_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
|
||||
case "CAPELLA_FORK_EPOCH":
|
||||
assert.Equal(t, "103", v)
|
||||
case "DENEB_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
|
||||
case "DENEB_FORK_EPOCH":
|
||||
assert.Equal(t, "105", v)
|
||||
case "ELECTRA_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("ElectraForkVersion")), v)
|
||||
case "ELECTRA_FORK_EPOCH":
|
||||
assert.Equal(t, "107", v)
|
||||
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
|
||||
assert.Equal(t, "1000", v)
|
||||
case "BLS_WITHDRAWAL_PREFIX":
|
||||
assert.Equal(t, "0x62", v)
|
||||
case "ETH1_ADDRESS_WITHDRAWAL_PREFIX":
|
||||
assert.Equal(t, "0x63", v)
|
||||
case "GENESIS_DELAY":
|
||||
assert.Equal(t, "24", v)
|
||||
case "SECONDS_PER_SLOT":
|
||||
assert.Equal(t, "25", v)
|
||||
case "MIN_ATTESTATION_INCLUSION_DELAY":
|
||||
assert.Equal(t, "26", v)
|
||||
case "SLOTS_PER_EPOCH":
|
||||
assert.Equal(t, "27", v)
|
||||
case "MIN_SEED_LOOKAHEAD":
|
||||
assert.Equal(t, "28", v)
|
||||
case "MAX_SEED_LOOKAHEAD":
|
||||
assert.Equal(t, "29", v)
|
||||
case "EPOCHS_PER_ETH1_VOTING_PERIOD":
|
||||
assert.Equal(t, "30", v)
|
||||
case "SLOTS_PER_HISTORICAL_ROOT":
|
||||
assert.Equal(t, "31", v)
|
||||
case "MIN_VALIDATOR_WITHDRAWABILITY_DELAY":
|
||||
assert.Equal(t, "32", v)
|
||||
case "SHARD_COMMITTEE_PERIOD":
|
||||
assert.Equal(t, "33", v)
|
||||
case "MIN_EPOCHS_TO_INACTIVITY_PENALTY":
|
||||
assert.Equal(t, "34", v)
|
||||
case "EPOCHS_PER_HISTORICAL_VECTOR":
|
||||
assert.Equal(t, "35", v)
|
||||
case "EPOCHS_PER_SLASHINGS_VECTOR":
|
||||
assert.Equal(t, "36", v)
|
||||
case "HISTORICAL_ROOTS_LIMIT":
|
||||
assert.Equal(t, "37", v)
|
||||
case "VALIDATOR_REGISTRY_LIMIT":
|
||||
assert.Equal(t, "38", v)
|
||||
case "BASE_REWARD_FACTOR":
|
||||
assert.Equal(t, "39", v)
|
||||
case "WHISTLEBLOWER_REWARD_QUOTIENT":
|
||||
assert.Equal(t, "40", v)
|
||||
case "PROPOSER_REWARD_QUOTIENT":
|
||||
assert.Equal(t, "41", v)
|
||||
case "INACTIVITY_PENALTY_QUOTIENT":
|
||||
assert.Equal(t, "42", v)
|
||||
case "HF1_INACTIVITY_PENALTY_QUOTIENT":
|
||||
assert.Equal(t, "43", v)
|
||||
case "MIN_SLASHING_PENALTY_QUOTIENT":
|
||||
assert.Equal(t, "44", v)
|
||||
case "HF1_MIN_SLASHING_PENALTY_QUOTIENT":
|
||||
assert.Equal(t, "45", v)
|
||||
case "PROPORTIONAL_SLASHING_MULTIPLIER":
|
||||
assert.Equal(t, "46", v)
|
||||
case "HF1_PROPORTIONAL_SLASHING_MULTIPLIER":
|
||||
assert.Equal(t, "47", v)
|
||||
case "MAX_PROPOSER_SLASHINGS":
|
||||
assert.Equal(t, "48", v)
|
||||
case "MAX_ATTESTER_SLASHINGS":
|
||||
assert.Equal(t, "49", v)
|
||||
case "MAX_ATTESTATIONS":
|
||||
assert.Equal(t, "50", v)
|
||||
case "MAX_DEPOSITS":
|
||||
assert.Equal(t, "51", v)
|
||||
case "MAX_VOLUNTARY_EXITS":
|
||||
assert.Equal(t, "52", v)
|
||||
case "MAX_BLOBS_PER_BLOCK":
|
||||
assert.Equal(t, "4", v)
|
||||
case "TIMELY_HEAD_FLAG_INDEX":
|
||||
assert.Equal(t, "0x35", v)
|
||||
case "TIMELY_SOURCE_FLAG_INDEX":
|
||||
assert.Equal(t, "0x36", v)
|
||||
case "TIMELY_TARGET_FLAG_INDEX":
|
||||
assert.Equal(t, "0x37", v)
|
||||
case "TIMELY_HEAD_WEIGHT":
|
||||
assert.Equal(t, "56", v)
|
||||
case "TIMELY_SOURCE_WEIGHT":
|
||||
assert.Equal(t, "57", v)
|
||||
case "TIMELY_TARGET_WEIGHT":
|
||||
assert.Equal(t, "58", v)
|
||||
case "SYNC_REWARD_WEIGHT":
|
||||
assert.Equal(t, "59", v)
|
||||
case "WEIGHT_DENOMINATOR":
|
||||
assert.Equal(t, "60", v)
|
||||
case "TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":
|
||||
assert.Equal(t, "61", v)
|
||||
case "SYNC_COMMITTEE_SUBNET_COUNT":
|
||||
assert.Equal(t, "62", v)
|
||||
case "SYNC_COMMITTEE_SIZE":
|
||||
assert.Equal(t, "63", v)
|
||||
case "SYNC_PUBKEYS_PER_AGGREGATE":
|
||||
assert.Equal(t, "64", v)
|
||||
case "INACTIVITY_SCORE_BIAS":
|
||||
assert.Equal(t, "65", v)
|
||||
case "EPOCHS_PER_SYNC_COMMITTEE_PERIOD":
|
||||
assert.Equal(t, "66", v)
|
||||
case "INACTIVITY_PENALTY_QUOTIENT_ALTAIR":
|
||||
assert.Equal(t, "67", v)
|
||||
case "MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":
|
||||
assert.Equal(t, "68", v)
|
||||
case "PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":
|
||||
assert.Equal(t, "69", v)
|
||||
case "INACTIVITY_SCORE_RECOVERY_RATE":
|
||||
assert.Equal(t, "70", v)
|
||||
case "MIN_SYNC_COMMITTEE_PARTICIPANTS":
|
||||
assert.Equal(t, "71", v)
|
||||
case "PROPOSER_WEIGHT":
|
||||
assert.Equal(t, "8", v)
|
||||
case "DOMAIN_BEACON_PROPOSER":
|
||||
assert.Equal(t, "0x30303031", v)
|
||||
case "DOMAIN_BEACON_ATTESTER":
|
||||
assert.Equal(t, "0x30303032", v)
|
||||
case "DOMAIN_RANDAO":
|
||||
assert.Equal(t, "0x30303033", v)
|
||||
case "DOMAIN_DEPOSIT":
|
||||
assert.Equal(t, "0x30303034", v)
|
||||
case "DOMAIN_VOLUNTARY_EXIT":
|
||||
assert.Equal(t, "0x30303035", v)
|
||||
case "DOMAIN_SELECTION_PROOF":
|
||||
assert.Equal(t, "0x30303036", v)
|
||||
case "DOMAIN_AGGREGATE_AND_PROOF":
|
||||
assert.Equal(t, "0x30303037", v)
|
||||
case "DOMAIN_APPLICATION_MASK":
|
||||
assert.Equal(t, "0x31303030", v)
|
||||
case "DOMAIN_SYNC_COMMITTEE":
|
||||
assert.Equal(t, "0x07000000", v)
|
||||
case "DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":
|
||||
assert.Equal(t, "0x08000000", v)
|
||||
case "DOMAIN_CONTRIBUTION_AND_PROOF":
|
||||
assert.Equal(t, "0x09000000", v)
|
||||
case "DOMAIN_BLS_TO_EXECUTION_CHANGE":
|
||||
assert.Equal(t, "0x0a000000", v)
|
||||
case "DOMAIN_APPLICATION_BUILDER":
|
||||
assert.Equal(t, "0x00000001", v)
|
||||
case "DOMAIN_BLOB_SIDECAR":
|
||||
assert.Equal(t, "0x00000000", v)
|
||||
case "TRANSITION_TOTAL_DIFFICULTY":
|
||||
assert.Equal(t, "0", v)
|
||||
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
|
||||
assert.Equal(t, "72", v)
|
||||
case "TERMINAL_BLOCK_HASH":
|
||||
s, ok := v.(string)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(s))
|
||||
case "TERMINAL_TOTAL_DIFFICULTY":
|
||||
assert.Equal(t, "73", v)
|
||||
case "DefaultFeeRecipient":
|
||||
assert.Equal(t, common.HexToAddress("DefaultFeeRecipient"), v)
|
||||
case "PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":
|
||||
assert.Equal(t, "3", v)
|
||||
case "MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":
|
||||
assert.Equal(t, "32", v)
|
||||
case "INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":
|
||||
assert.Equal(t, "16777216", v)
|
||||
case "PROPOSER_SCORE_BOOST":
|
||||
assert.Equal(t, "40", v)
|
||||
case "INTERVALS_PER_SLOT":
|
||||
assert.Equal(t, "3", v)
|
||||
case "MAX_WITHDRAWALS_PER_PAYLOAD":
|
||||
assert.Equal(t, "74", v)
|
||||
case "MAX_BLS_TO_EXECUTION_CHANGES":
|
||||
assert.Equal(t, "75", v)
|
||||
case "MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":
|
||||
assert.Equal(t, "76", v)
|
||||
case "REORG_MAX_EPOCHS_SINCE_FINALIZATION":
|
||||
assert.Equal(t, "2", v)
|
||||
case "REORG_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "20", v)
|
||||
case "REORG_PARENT_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "160", v)
|
||||
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
|
||||
assert.Equal(t, "8", v)
|
||||
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
|
||||
assert.Equal(t, "128", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
case "NODE_ID_BITS":
|
||||
assert.Equal(t, "256", v)
|
||||
case "ATTESTATION_SUBNET_EXTRA_BITS":
|
||||
assert.Equal(t, "0", v)
|
||||
case "ATTESTATION_SUBNET_PREFIX_BITS":
|
||||
assert.Equal(t, "6", v)
|
||||
case "SUBNETS_PER_NODE":
|
||||
assert.Equal(t, "2", v)
|
||||
case "EPOCHS_PER_SUBNET_SUBSCRIPTION":
|
||||
assert.Equal(t, "256", v)
|
||||
case "MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":
|
||||
assert.Equal(t, "4096", v)
|
||||
case "MAX_REQUEST_BLOB_SIDECARS":
|
||||
assert.Equal(t, "768", v)
|
||||
case "MESSAGE_DOMAIN_INVALID_SNAPPY":
|
||||
assert.Equal(t, "0x00000000", v)
|
||||
case "MESSAGE_DOMAIN_VALID_SNAPPY":
|
||||
assert.Equal(t, "0x01000000", v)
|
||||
case "ATTESTATION_PROPAGATION_SLOT_RANGE":
|
||||
assert.Equal(t, "32", v)
|
||||
case "RESP_TIMEOUT":
|
||||
assert.Equal(t, "10", v)
|
||||
case "TTFB_TIMEOUT":
|
||||
assert.Equal(t, "5", v)
|
||||
case "MIN_EPOCHS_FOR_BLOCK_REQUESTS":
|
||||
assert.Equal(t, "33024", v)
|
||||
case "GOSSIP_MAX_SIZE":
|
||||
assert.Equal(t, "10485760", v)
|
||||
case "MAX_CHUNK_SIZE":
|
||||
assert.Equal(t, "10485760", v)
|
||||
case "ATTESTATION_SUBNET_COUNT":
|
||||
assert.Equal(t, "64", v)
|
||||
case "MAXIMUM_GOSSIP_CLOCK_DISPARITY":
|
||||
assert.Equal(t, "500", v)
|
||||
case "MAX_REQUEST_BLOCKS":
|
||||
assert.Equal(t, "1024", v)
|
||||
case "MAX_REQUEST_BLOCKS_DENEB":
|
||||
assert.Equal(t, "128", v)
|
||||
case "NUMBER_OF_COLUMNS":
|
||||
assert.Equal(t, "128", v)
|
||||
case "MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA":
|
||||
assert.Equal(t, "128000000000", v)
|
||||
case "MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT":
|
||||
assert.Equal(t, "256000000000", v)
|
||||
case "DATA_COLUMN_SIDECAR_SUBNET_COUNT":
|
||||
assert.Equal(t, "32", v)
|
||||
case "MAX_REQUEST_DATA_COLUMN_SIDECARS":
|
||||
assert.Equal(t, "16384", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
})
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
assert.Equal(t, "ConfigName", v)
|
||||
case "PRESET_BASE":
|
||||
assert.Equal(t, "PresetBase", v)
|
||||
case "MAX_COMMITTEES_PER_SLOT":
|
||||
assert.Equal(t, "1", v)
|
||||
case "TARGET_COMMITTEE_SIZE":
|
||||
assert.Equal(t, "2", v)
|
||||
case "MAX_VALIDATORS_PER_COMMITTEE":
|
||||
assert.Equal(t, "3", v)
|
||||
case "MIN_PER_EPOCH_CHURN_LIMIT":
|
||||
assert.Equal(t, "4", v)
|
||||
case "CHURN_LIMIT_QUOTIENT":
|
||||
assert.Equal(t, "5", v)
|
||||
case "SHUFFLE_ROUND_COUNT":
|
||||
assert.Equal(t, "6", v)
|
||||
case "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":
|
||||
assert.Equal(t, "7", v)
|
||||
case "MIN_GENESIS_TIME":
|
||||
assert.Equal(t, "8", v)
|
||||
case "HYSTERESIS_QUOTIENT":
|
||||
assert.Equal(t, "9", v)
|
||||
case "HYSTERESIS_DOWNWARD_MULTIPLIER":
|
||||
assert.Equal(t, "10", v)
|
||||
case "HYSTERESIS_UPWARD_MULTIPLIER":
|
||||
assert.Equal(t, "11", v)
|
||||
case "SAFE_SLOTS_TO_UPDATE_JUSTIFIED":
|
||||
assert.Equal(t, "0", v)
|
||||
case "ETH1_FOLLOW_DISTANCE":
|
||||
assert.Equal(t, "13", v)
|
||||
case "TARGET_AGGREGATORS_PER_COMMITTEE":
|
||||
assert.Equal(t, "14", v)
|
||||
case "RANDOM_SUBNETS_PER_VALIDATOR":
|
||||
assert.Equal(t, "15", v)
|
||||
case "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION":
|
||||
assert.Equal(t, "16", v)
|
||||
case "SECONDS_PER_ETH1_BLOCK":
|
||||
assert.Equal(t, "17", v)
|
||||
case "DEPOSIT_CHAIN_ID":
|
||||
assert.Equal(t, "18", v)
|
||||
case "DEPOSIT_NETWORK_ID":
|
||||
assert.Equal(t, "19", v)
|
||||
case "DEPOSIT_CONTRACT_ADDRESS":
|
||||
assert.Equal(t, "DepositContractAddress", v)
|
||||
case "MIN_DEPOSIT_AMOUNT":
|
||||
assert.Equal(t, "20", v)
|
||||
case "MAX_EFFECTIVE_BALANCE":
|
||||
assert.Equal(t, "21", v)
|
||||
case "EJECTION_BALANCE":
|
||||
assert.Equal(t, "22", v)
|
||||
case "EFFECTIVE_BALANCE_INCREMENT":
|
||||
assert.Equal(t, "23", v)
|
||||
case "GENESIS_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("GenesisForkVersion")), v)
|
||||
case "ALTAIR_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("AltairForkVersion")), v)
|
||||
case "ALTAIR_FORK_EPOCH":
|
||||
assert.Equal(t, "100", v)
|
||||
case "BELLATRIX_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("BellatrixForkVersion")), v)
|
||||
case "BELLATRIX_FORK_EPOCH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "CAPELLA_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
|
||||
case "CAPELLA_FORK_EPOCH":
|
||||
assert.Equal(t, "103", v)
|
||||
case "DENEB_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
|
||||
case "DENEB_FORK_EPOCH":
|
||||
assert.Equal(t, "105", v)
|
||||
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
|
||||
assert.Equal(t, "1000", v)
|
||||
case "BLS_WITHDRAWAL_PREFIX":
|
||||
assert.Equal(t, "0x62", v)
|
||||
case "ETH1_ADDRESS_WITHDRAWAL_PREFIX":
|
||||
assert.Equal(t, "0x63", v)
|
||||
case "GENESIS_DELAY":
|
||||
assert.Equal(t, "24", v)
|
||||
case "SECONDS_PER_SLOT":
|
||||
assert.Equal(t, "25", v)
|
||||
case "MIN_ATTESTATION_INCLUSION_DELAY":
|
||||
assert.Equal(t, "26", v)
|
||||
case "SLOTS_PER_EPOCH":
|
||||
assert.Equal(t, "27", v)
|
||||
case "MIN_SEED_LOOKAHEAD":
|
||||
assert.Equal(t, "28", v)
|
||||
case "MAX_SEED_LOOKAHEAD":
|
||||
assert.Equal(t, "29", v)
|
||||
case "EPOCHS_PER_ETH1_VOTING_PERIOD":
|
||||
assert.Equal(t, "30", v)
|
||||
case "SLOTS_PER_HISTORICAL_ROOT":
|
||||
assert.Equal(t, "31", v)
|
||||
case "MIN_VALIDATOR_WITHDRAWABILITY_DELAY":
|
||||
assert.Equal(t, "32", v)
|
||||
case "SHARD_COMMITTEE_PERIOD":
|
||||
assert.Equal(t, "33", v)
|
||||
case "MIN_EPOCHS_TO_INACTIVITY_PENALTY":
|
||||
assert.Equal(t, "34", v)
|
||||
case "EPOCHS_PER_HISTORICAL_VECTOR":
|
||||
assert.Equal(t, "35", v)
|
||||
case "EPOCHS_PER_SLASHINGS_VECTOR":
|
||||
assert.Equal(t, "36", v)
|
||||
case "HISTORICAL_ROOTS_LIMIT":
|
||||
assert.Equal(t, "37", v)
|
||||
case "VALIDATOR_REGISTRY_LIMIT":
|
||||
assert.Equal(t, "38", v)
|
||||
case "BASE_REWARD_FACTOR":
|
||||
assert.Equal(t, "39", v)
|
||||
case "WHISTLEBLOWER_REWARD_QUOTIENT":
|
||||
assert.Equal(t, "40", v)
|
||||
case "PROPOSER_REWARD_QUOTIENT":
|
||||
assert.Equal(t, "41", v)
|
||||
case "INACTIVITY_PENALTY_QUOTIENT":
|
||||
assert.Equal(t, "42", v)
|
||||
case "HF1_INACTIVITY_PENALTY_QUOTIENT":
|
||||
assert.Equal(t, "43", v)
|
||||
case "MIN_SLASHING_PENALTY_QUOTIENT":
|
||||
assert.Equal(t, "44", v)
|
||||
case "HF1_MIN_SLASHING_PENALTY_QUOTIENT":
|
||||
assert.Equal(t, "45", v)
|
||||
case "PROPORTIONAL_SLASHING_MULTIPLIER":
|
||||
assert.Equal(t, "46", v)
|
||||
case "HF1_PROPORTIONAL_SLASHING_MULTIPLIER":
|
||||
assert.Equal(t, "47", v)
|
||||
case "MAX_PROPOSER_SLASHINGS":
|
||||
assert.Equal(t, "48", v)
|
||||
case "MAX_ATTESTER_SLASHINGS":
|
||||
assert.Equal(t, "49", v)
|
||||
case "MAX_ATTESTATIONS":
|
||||
assert.Equal(t, "50", v)
|
||||
case "MAX_DEPOSITS":
|
||||
assert.Equal(t, "51", v)
|
||||
case "MAX_VOLUNTARY_EXITS":
|
||||
assert.Equal(t, "52", v)
|
||||
case "MAX_BLOBS_PER_BLOCK":
|
||||
assert.Equal(t, "4", v)
|
||||
case "TIMELY_HEAD_FLAG_INDEX":
|
||||
assert.Equal(t, "0x35", v)
|
||||
case "TIMELY_SOURCE_FLAG_INDEX":
|
||||
assert.Equal(t, "0x36", v)
|
||||
case "TIMELY_TARGET_FLAG_INDEX":
|
||||
assert.Equal(t, "0x37", v)
|
||||
case "TIMELY_HEAD_WEIGHT":
|
||||
assert.Equal(t, "56", v)
|
||||
case "TIMELY_SOURCE_WEIGHT":
|
||||
assert.Equal(t, "57", v)
|
||||
case "TIMELY_TARGET_WEIGHT":
|
||||
assert.Equal(t, "58", v)
|
||||
case "SYNC_REWARD_WEIGHT":
|
||||
assert.Equal(t, "59", v)
|
||||
case "WEIGHT_DENOMINATOR":
|
||||
assert.Equal(t, "60", v)
|
||||
case "TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":
|
||||
assert.Equal(t, "61", v)
|
||||
case "SYNC_COMMITTEE_SUBNET_COUNT":
|
||||
assert.Equal(t, "62", v)
|
||||
case "SYNC_COMMITTEE_SIZE":
|
||||
assert.Equal(t, "63", v)
|
||||
case "SYNC_PUBKEYS_PER_AGGREGATE":
|
||||
assert.Equal(t, "64", v)
|
||||
case "INACTIVITY_SCORE_BIAS":
|
||||
assert.Equal(t, "65", v)
|
||||
case "EPOCHS_PER_SYNC_COMMITTEE_PERIOD":
|
||||
assert.Equal(t, "66", v)
|
||||
case "INACTIVITY_PENALTY_QUOTIENT_ALTAIR":
|
||||
assert.Equal(t, "67", v)
|
||||
case "MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":
|
||||
assert.Equal(t, "68", v)
|
||||
case "PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":
|
||||
assert.Equal(t, "69", v)
|
||||
case "INACTIVITY_SCORE_RECOVERY_RATE":
|
||||
assert.Equal(t, "70", v)
|
||||
case "MIN_SYNC_COMMITTEE_PARTICIPANTS":
|
||||
assert.Equal(t, "71", v)
|
||||
case "PROPOSER_WEIGHT":
|
||||
assert.Equal(t, "8", v)
|
||||
case "DOMAIN_BEACON_PROPOSER":
|
||||
assert.Equal(t, "0x30303031", v)
|
||||
case "DOMAIN_BEACON_ATTESTER":
|
||||
assert.Equal(t, "0x30303032", v)
|
||||
case "DOMAIN_RANDAO":
|
||||
assert.Equal(t, "0x30303033", v)
|
||||
case "DOMAIN_DEPOSIT":
|
||||
assert.Equal(t, "0x30303034", v)
|
||||
case "DOMAIN_VOLUNTARY_EXIT":
|
||||
assert.Equal(t, "0x30303035", v)
|
||||
case "DOMAIN_SELECTION_PROOF":
|
||||
assert.Equal(t, "0x30303036", v)
|
||||
case "DOMAIN_AGGREGATE_AND_PROOF":
|
||||
assert.Equal(t, "0x30303037", v)
|
||||
case "DOMAIN_APPLICATION_MASK":
|
||||
assert.Equal(t, "0x31303030", v)
|
||||
case "DOMAIN_SYNC_COMMITTEE":
|
||||
assert.Equal(t, "0x07000000", v)
|
||||
case "DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":
|
||||
assert.Equal(t, "0x08000000", v)
|
||||
case "DOMAIN_CONTRIBUTION_AND_PROOF":
|
||||
assert.Equal(t, "0x09000000", v)
|
||||
case "DOMAIN_BLS_TO_EXECUTION_CHANGE":
|
||||
assert.Equal(t, "0x0a000000", v)
|
||||
case "DOMAIN_APPLICATION_BUILDER":
|
||||
assert.Equal(t, "0x00000001", v)
|
||||
case "DOMAIN_BLOB_SIDECAR":
|
||||
assert.Equal(t, "0x00000000", v)
|
||||
case "TRANSITION_TOTAL_DIFFICULTY":
|
||||
assert.Equal(t, "0", v)
|
||||
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
|
||||
assert.Equal(t, "72", v)
|
||||
case "TERMINAL_BLOCK_HASH":
|
||||
s, ok := v.(string)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(s))
|
||||
case "TERMINAL_TOTAL_DIFFICULTY":
|
||||
assert.Equal(t, "73", v)
|
||||
case "DefaultFeeRecipient":
|
||||
assert.Equal(t, common.HexToAddress("DefaultFeeRecipient"), v)
|
||||
case "PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":
|
||||
assert.Equal(t, "3", v)
|
||||
case "MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":
|
||||
assert.Equal(t, "32", v)
|
||||
case "INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":
|
||||
assert.Equal(t, "16777216", v)
|
||||
case "PROPOSER_SCORE_BOOST":
|
||||
assert.Equal(t, "40", v)
|
||||
case "INTERVALS_PER_SLOT":
|
||||
assert.Equal(t, "3", v)
|
||||
case "MAX_WITHDRAWALS_PER_PAYLOAD":
|
||||
assert.Equal(t, "74", v)
|
||||
case "MAX_BLS_TO_EXECUTION_CHANGES":
|
||||
assert.Equal(t, "75", v)
|
||||
case "MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":
|
||||
assert.Equal(t, "76", v)
|
||||
case "REORG_MAX_EPOCHS_SINCE_FINALIZATION":
|
||||
assert.Equal(t, "2", v)
|
||||
case "REORG_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "20", v)
|
||||
case "REORG_PARENT_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "160", v)
|
||||
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
|
||||
assert.Equal(t, "8", v)
|
||||
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
|
||||
assert.Equal(t, "128", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
case "NODE_ID_BITS":
|
||||
assert.Equal(t, "256", v)
|
||||
case "ATTESTATION_SUBNET_EXTRA_BITS":
|
||||
assert.Equal(t, "0", v)
|
||||
case "ATTESTATION_SUBNET_PREFIX_BITS":
|
||||
assert.Equal(t, "6", v)
|
||||
case "SUBNETS_PER_NODE":
|
||||
assert.Equal(t, "2", v)
|
||||
case "EPOCHS_PER_SUBNET_SUBSCRIPTION":
|
||||
assert.Equal(t, "256", v)
|
||||
case "MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":
|
||||
assert.Equal(t, "4096", v)
|
||||
case "MAX_REQUEST_BLOB_SIDECARS":
|
||||
assert.Equal(t, "768", v)
|
||||
case "MESSAGE_DOMAIN_INVALID_SNAPPY":
|
||||
assert.Equal(t, "0x00000000", v)
|
||||
case "MESSAGE_DOMAIN_VALID_SNAPPY":
|
||||
assert.Equal(t, "0x01000000", v)
|
||||
case "ATTESTATION_PROPAGATION_SLOT_RANGE":
|
||||
assert.Equal(t, "32", v)
|
||||
case "RESP_TIMEOUT":
|
||||
assert.Equal(t, "10", v)
|
||||
case "TTFB_TIMEOUT":
|
||||
assert.Equal(t, "5", v)
|
||||
case "MIN_EPOCHS_FOR_BLOCK_REQUESTS":
|
||||
assert.Equal(t, "33024", v)
|
||||
case "GOSSIP_MAX_SIZE":
|
||||
assert.Equal(t, "10485760", v)
|
||||
case "MAX_CHUNK_SIZE":
|
||||
assert.Equal(t, "10485760", v)
|
||||
case "ATTESTATION_SUBNET_COUNT":
|
||||
assert.Equal(t, "64", v)
|
||||
case "MAXIMUM_GOSSIP_CLOCK_DISPARITY":
|
||||
assert.Equal(t, "500", v)
|
||||
case "MAX_REQUEST_BLOCKS":
|
||||
assert.Equal(t, "1024", v)
|
||||
case "MAX_REQUEST_BLOCKS_DENEB":
|
||||
assert.Equal(t, "128", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ func (*Server) GetVersion(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// GetHealth returns node health status in http status codes. Useful for load balancers.
|
||||
func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "node.GetHealth")
|
||||
_, span := trace.StartSpan(r.Context(), "node.GetHealth")
|
||||
defer span.End()
|
||||
|
||||
rawSyncingStatus, syncingStatus, ok := shared.UintFromQuery(w, r, "syncing_status", false)
|
||||
@@ -119,14 +119,10 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if s.SyncChecker.Synced() && !optimistic {
|
||||
if s.SyncChecker.Synced() {
|
||||
return
|
||||
}
|
||||
if s.SyncChecker.Syncing() || optimistic {
|
||||
if s.SyncChecker.Syncing() || s.SyncChecker.Initialized() {
|
||||
if rawSyncingStatus != "" {
|
||||
w.WriteHeader(intSyncingStatus)
|
||||
} else {
|
||||
@@ -134,6 +130,5 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
}
|
||||
|
||||
@@ -91,10 +91,8 @@ func TestGetVersion(t *testing.T) {
|
||||
|
||||
func TestGetHealth(t *testing.T) {
|
||||
checker := &syncmock.Sync{}
|
||||
optimisticFetcher := &mock.ChainService{Optimistic: false}
|
||||
s := &Server{
|
||||
SyncChecker: checker,
|
||||
OptimisticModeFetcher: optimisticFetcher,
|
||||
SyncChecker: checker,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
|
||||
@@ -103,30 +101,25 @@ func TestGetHealth(t *testing.T) {
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
|
||||
checker.IsSyncing = true
|
||||
checker.IsSynced = false
|
||||
checker.IsInitialized = true
|
||||
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
|
||||
writer = httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusPartialContent, writer.Code)
|
||||
|
||||
request = httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com/eth/v1/node/health?syncing_status=%d", http.StatusPaymentRequired), nil)
|
||||
writer = httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusPaymentRequired, writer.Code)
|
||||
|
||||
checker.IsSyncing = false
|
||||
checker.IsSynced = true
|
||||
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
|
||||
writer = httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
checker.IsSyncing = false
|
||||
checker.IsSynced = true
|
||||
optimisticFetcher.Optimistic = true
|
||||
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
|
||||
writer = httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.GetHealth(writer, request)
|
||||
assert.Equal(t, http.StatusPartialContent, writer.Code)
|
||||
}
|
||||
|
||||
func TestGetIdentity(t *testing.T) {
|
||||
|
||||
@@ -223,7 +223,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "failed to retrieve block from db"), Reason: core.Internal}
|
||||
}
|
||||
// if block is not in the retention window return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
commitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
|
||||
@@ -164,7 +164,8 @@ func TestGetBlob(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), denebBlock))
|
||||
_, bs := filesystem.NewEphemeralBlobStorageWithFs(t)
|
||||
_, bs, err := filesystem.NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(blobs)
|
||||
require.NoError(t, err)
|
||||
for i := range testSidecars {
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
# gazelle:ignore
|
||||
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
@@ -46,7 +44,6 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -91,7 +88,6 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
@@ -177,6 +173,7 @@ common_deps = [
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
]
|
||||
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
timeout = "moderate",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
@@ -19,12 +18,9 @@ import (
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -255,15 +251,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
}
|
||||
|
||||
// ProposeBeaconBlock handles the proposal of beacon blocks.
|
||||
// TODO: Add tests
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar
|
||||
)
|
||||
|
||||
isPeerDASEnabled := features.Get().EnablePeerDAS
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -276,16 +264,14 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
|
||||
}
|
||||
|
||||
var sidecars []*ethpb.BlobSidecar
|
||||
if block.IsBlinded() {
|
||||
block, blobSidecars, dataColumnSideCars, err = vs.handleBlindedBlock(ctx, block, isPeerDASEnabled)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle blinded block", err)
|
||||
}
|
||||
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
} else {
|
||||
blobSidecars, dataColumnSideCars, err = handleUnblidedBlock(block, req, isPeerDASEnabled)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle unblided block", err)
|
||||
}
|
||||
sidecars, err = vs.handleUnblindedBlock(block, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
}
|
||||
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
@@ -306,14 +292,8 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
if isPeerDASEnabled {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive data columns: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
}
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@@ -325,83 +305,47 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock, isPeerDASEnabled bool) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
return nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
}
|
||||
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return nil, nil, nil, errors.New("unconfigured block builder")
|
||||
return nil, nil, errors.New("unconfigured block builder")
|
||||
}
|
||||
|
||||
copiedBlock, err := block.Copy()
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "block copy")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
payload, bundle, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, block)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "submit blinded block")
|
||||
return nil, nil, errors.Wrap(err, "submit blinded block failed")
|
||||
}
|
||||
|
||||
if err := copiedBlock.Unblind(payload); err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind")
|
||||
return nil, nil, errors.Wrap(err, "unblind failed")
|
||||
}
|
||||
|
||||
if isPeerDASEnabled {
|
||||
dataColumnSideCars, err := unblindDataColumnsSidecars(copiedBlock, bundle)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind data columns sidecars")
|
||||
}
|
||||
|
||||
return copiedBlock, nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind blobs sidecars")
|
||||
return nil, nil, errors.Wrap(err, "unblind sidecars failed")
|
||||
}
|
||||
|
||||
return copiedBlock, blobSidecars, nil, nil
|
||||
return copiedBlock, sidecars, nil
|
||||
}
|
||||
|
||||
// handleUnblindedBlock processes unblinded beacon blocks.
|
||||
func handleUnblidedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock, isPeerDASEnabled bool) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
func (vs *Server) handleUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
|
||||
dbBlockContents := req.GetDeneb()
|
||||
|
||||
if dbBlockContents == nil {
|
||||
return nil, nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if isPeerDASEnabled {
|
||||
// Convert blobs from slices to array.
|
||||
blobs := make([]cKzg4844.Blob, 0, len(dbBlockContents.Blobs))
|
||||
for _, blob := range dbBlockContents.Blobs {
|
||||
if len(blob) != cKzg4844.BytesPerBlob {
|
||||
return nil, nil, errors.Errorf("invalid blob size. expected %d bytes, got %d bytes", cKzg4844.BytesPerBlob, len(blob))
|
||||
}
|
||||
|
||||
blobs = append(blobs, cKzg4844.Blob(blob))
|
||||
}
|
||||
|
||||
dataColumnSideCars, err := peerdas.DataColumnSidecars(block, blobs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil, nil
|
||||
return BuildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -417,7 +361,7 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
}
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
|
||||
for i, sc := range sidecars {
|
||||
if err := vs.P2P.BroadcastBlob(ctx, uint64(i), sc); err != nil {
|
||||
return errors.Wrap(err, "broadcast blob failed")
|
||||
@@ -427,12 +371,10 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ROBlob creation failed")
|
||||
}
|
||||
|
||||
verifiedBlob := blocks.NewVerifiedROBlob(readOnlySc)
|
||||
if err := vs.BlobReceiver.ReceiveBlob(ctx, verifiedBlob); err != nil {
|
||||
return errors.Wrap(err, "receive blob failed")
|
||||
}
|
||||
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.BlobSidecarReceived,
|
||||
Data: &operation.BlobSidecarReceivedData{Blob: &verifiedBlob},
|
||||
@@ -441,31 +383,6 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
return nil
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, sidecars []*ethpb.DataColumnSidecar, root [fieldparams.RootLength]byte) error {
|
||||
for i, sidecar := range sidecars {
|
||||
if err := vs.P2P.BroadcastDataColumn(ctx, uint64(i), sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecar, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumn(ctx, verifiedRODataColumn); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
func (vs *Server) PrepareBeaconProposer(
|
||||
_ context.Context, request *ethpb.PrepareBeaconProposerRequest,
|
||||
|
||||
@@ -58,13 +58,6 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
slot := blk.Slot()
|
||||
vIdx := blk.ProposerIndex()
|
||||
headRoot := blk.ParentRoot()
|
||||
var err error
|
||||
if headRoot == [32]byte{} {
|
||||
headRoot, err = vs.BeaconDB.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"validatorIndex": vIdx,
|
||||
"slot": slot,
|
||||
@@ -78,6 +71,7 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
}
|
||||
setFeeRecipientIfBurnAddress(&val)
|
||||
|
||||
var err error
|
||||
if ok && payloadId != [8]byte{} {
|
||||
// Payload ID is cache hit. Return the cached payload ID.
|
||||
var pid primitives.PayloadID
|
||||
|
||||
@@ -778,7 +778,7 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
useBuilder: true,
|
||||
err: "unblind blobs sidecars: commitment value doesn't match block",
|
||||
err: "unblind sidecars failed: commitment value doesn't match block",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -67,7 +67,6 @@ type Server struct {
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositcache.PendingDepositsFetcher
|
||||
|
||||
@@ -3,10 +3,7 @@ package validator
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -71,29 +68,3 @@ func unblindBlobsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.B
|
||||
}
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// TODO: Add tests
|
||||
func unblindDataColumnsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.BlobsBundle) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the block is at least a Deneb block.
|
||||
if block.Version() < version.Deneb {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Convert blobs from slices to array.
|
||||
blobs := make([]cKzg4844.Blob, 0, len(bundle.Blobs))
|
||||
for _, blob := range bundle.Blobs {
|
||||
if len(blob) != cKzg4844.BytesPerBlob {
|
||||
return nil, errors.Errorf("invalid blob size. expected %d bytes, got %d bytes", cKzg4844.BytesPerBlob, len(blob))
|
||||
}
|
||||
|
||||
blobs = append(blobs, cKzg4844.Blob(blob))
|
||||
}
|
||||
|
||||
// Retrieve data columns from blobs.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(block, blobs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
|
||||
@@ -118,7 +118,6 @@ type ReadOnlyValidator interface {
|
||||
// ReadOnlyValidators defines a struct which only has read access to validators methods.
|
||||
type ReadOnlyValidators interface {
|
||||
Validators() []*ethpb.Validator
|
||||
ValidatorsReadOnly() []ReadOnlyValidator
|
||||
ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error)
|
||||
ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (ReadOnlyValidator, error)
|
||||
ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
|
||||
|
||||
@@ -21,14 +21,6 @@ func (b *BeaconState) Validators() []*ethpb.Validator {
|
||||
return b.validatorsVal()
|
||||
}
|
||||
|
||||
// ValidatorsReadOnly participating in consensus on the beacon chain.
|
||||
func (b *BeaconState) ValidatorsReadOnly() []state.ReadOnlyValidator {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.validatorsReadOnlyVal()
|
||||
}
|
||||
|
||||
func (b *BeaconState) validatorsVal() []*ethpb.Validator {
|
||||
var v []*ethpb.Validator
|
||||
if features.Get().EnableExperimentalState {
|
||||
@@ -54,35 +46,6 @@ func (b *BeaconState) validatorsVal() []*ethpb.Validator {
|
||||
return res
|
||||
}
|
||||
|
||||
func (b *BeaconState) validatorsReadOnlyVal() []state.ReadOnlyValidator {
|
||||
var v []*ethpb.Validator
|
||||
if features.Get().EnableExperimentalState {
|
||||
if b.validatorsMultiValue == nil {
|
||||
return nil
|
||||
}
|
||||
v = b.validatorsMultiValue.Value(b)
|
||||
} else {
|
||||
if b.validators == nil {
|
||||
return nil
|
||||
}
|
||||
v = b.validators
|
||||
}
|
||||
|
||||
res := make([]state.ReadOnlyValidator, len(v))
|
||||
var err error
|
||||
for i := 0; i < len(res); i++ {
|
||||
val := v[i]
|
||||
if val == nil {
|
||||
continue
|
||||
}
|
||||
res[i], err = NewValidator(val)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// references of validators participating in consensus on the beacon chain.
|
||||
// This assumes that a lock is already held on BeaconState. This does not
|
||||
// copy fully and instead just copies the reference.
|
||||
|
||||
@@ -25,7 +25,6 @@ go_library(
|
||||
"rpc_blob_sidecars_by_range.go",
|
||||
"rpc_blob_sidecars_by_root.go",
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_metadata.go",
|
||||
"rpc_ping.go",
|
||||
@@ -38,7 +37,6 @@ go_library(
|
||||
"subscriber_beacon_blocks.go",
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
@@ -49,7 +47,6 @@ go_library(
|
||||
"validate_beacon_blocks.go",
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_data_column.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
"validate_sync_contribution_proof.go",
|
||||
@@ -74,7 +71,6 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
@@ -56,8 +55,6 @@ const (
|
||||
batchEndSequence
|
||||
)
|
||||
|
||||
var retryDelay = time.Second
|
||||
|
||||
type batchId string
|
||||
|
||||
type batch struct {
|
||||
@@ -65,7 +62,6 @@ type batch struct {
|
||||
scheduled time.Time
|
||||
seq int // sequence identifier, ie how many times has the sequence() method served this batch
|
||||
retries int
|
||||
retryAfter time.Time
|
||||
begin primitives.Slot
|
||||
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
|
||||
results verifiedROBlocks
|
||||
@@ -78,7 +74,7 @@ type batch struct {
|
||||
}
|
||||
|
||||
func (b batch) logFields() logrus.Fields {
|
||||
f := map[string]interface{}{
|
||||
return map[string]interface{}{
|
||||
"batchId": b.id(),
|
||||
"state": b.state.String(),
|
||||
"scheduled": b.scheduled.String(),
|
||||
@@ -90,10 +86,6 @@ func (b batch) logFields() logrus.Fields {
|
||||
"blockPid": b.blockPid,
|
||||
"blobPid": b.blobPid,
|
||||
}
|
||||
if b.retries > 0 {
|
||||
f["retryAfter"] = b.retryAfter.String()
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (b batch) replaces(r batch) bool {
|
||||
@@ -161,8 +153,7 @@ func (b batch) withState(s batchState) batch {
|
||||
switch b.state {
|
||||
case batchErrRetryable:
|
||||
b.retries += 1
|
||||
b.retryAfter = time.Now().Add(retryDelay)
|
||||
log.WithFields(b.logFields()).Info("Sequencing batch for retry after delay")
|
||||
log.WithFields(b.logFields()).Info("Sequencing batch for retry")
|
||||
case batchInit, batchNil:
|
||||
b.firstScheduled = b.scheduled
|
||||
}
|
||||
@@ -199,32 +190,8 @@ func (b batch) availabilityStore() das.AvailabilityStore {
|
||||
return b.bs.store
|
||||
}
|
||||
|
||||
var batchBlockUntil = func(ctx context.Context, untilRetry time.Duration, b batch) error {
|
||||
log.WithFields(b.logFields()).WithField("untilRetry", untilRetry.String()).
|
||||
Debug("Sleeping for retry backoff delay")
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(untilRetry):
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b batch) waitUntilReady(ctx context.Context) error {
|
||||
// Wait to retry a failed batch to avoid hammering peers
|
||||
// if we've hit a state where batches will consistently fail.
|
||||
// Avoids spamming requests and logs.
|
||||
if b.retries > 0 {
|
||||
untilRetry := time.Until(b.retryAfter)
|
||||
if untilRetry > time.Millisecond {
|
||||
return batchBlockUntil(ctx, untilRetry, b)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sortBatchDesc(bb []batch) {
|
||||
sort.Slice(bb, func(i, j int) bool {
|
||||
return bb[i].end > bb[j].end
|
||||
return bb[j].end < bb[i].end
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -22,22 +19,3 @@ func TestSortBatchDesc(t *testing.T) {
|
||||
require.Equal(t, orderOut[i], batches[i].end)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitUntilReady(t *testing.T) {
|
||||
b := batch{}.withState(batchErrRetryable)
|
||||
require.Equal(t, time.Time{}, b.retryAfter)
|
||||
var got time.Duration
|
||||
wur := batchBlockUntil
|
||||
var errDerp = errors.New("derp")
|
||||
batchBlockUntil = func(_ context.Context, ur time.Duration, _ batch) error {
|
||||
got = ur
|
||||
return errDerp
|
||||
}
|
||||
// retries counter and timestamp are set when we mark the batch for sequencing, if it is in the retry state
|
||||
b = b.withState(batchSequenced)
|
||||
require.ErrorIs(t, b.waitUntilReady(context.Background()), errDerp)
|
||||
require.Equal(t, true, retryDelay-time.Until(b.retryAfter) < time.Millisecond)
|
||||
require.Equal(t, true, got < retryDelay && got > retryDelay-time.Millisecond)
|
||||
require.Equal(t, 1, b.retries)
|
||||
batchBlockUntil = wur
|
||||
}
|
||||
|
||||
@@ -143,11 +143,6 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
return
|
||||
}
|
||||
for _, pid := range assigned {
|
||||
if err := todo[0].waitUntilReady(p.ctx); err != nil {
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
p.shutdown(p.ctx.Err())
|
||||
return
|
||||
}
|
||||
busy[pid] = true
|
||||
todo[0].busy = pid
|
||||
p.toWorkers <- todo[0].withPeer(pid)
|
||||
|
||||
@@ -39,8 +39,6 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
case strings.Contains(topic, p2p.GossipBlobSidecarMessage):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.BlobSidecar{})]
|
||||
case strings.Contains(topic, p2p.GossipDataColumnSidecarMessage):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.DataColumnSidecar{})]
|
||||
}
|
||||
|
||||
base := p2p.GossipTopicMappings(topic, 0)
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -99,13 +98,6 @@ func (s *Service) registerRPCHandlersAltair() {
|
||||
}
|
||||
|
||||
func (s *Service) registerRPCHandlersDeneb() {
|
||||
if features.Get().EnablePeerDAS {
|
||||
s.registerRPC(
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1,
|
||||
s.dataColumnSidecarByRootRPCHandler,
|
||||
)
|
||||
return
|
||||
}
|
||||
s.registerRPC(
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1,
|
||||
s.blobSidecarsByRangeRPCHandler,
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
@@ -176,22 +175,3 @@ func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOrac
|
||||
_, err = encoding.EncodeWithMaxLength(stream, sidecar)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteDataColumnSidecarChunk writes data column chunk object to stream.
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, sidecar *ethpb.DataColumnSidecar) error {
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(sidecar.SignedBlockHeader.Header.Slot), valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, sidecar)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,149 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.dataColumnSidecarByRootRPCHandler")
|
||||
defer span.End()
|
||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
log := log.WithField("handler", p2p.DataColumnSidecarsByRootName[1:]) // slice the leading slash off the name var
|
||||
// We use the same type as for blobs as they are the same data structure.
|
||||
// TODO: Make the type naming more generic to be extensible to data columns
|
||||
ref, ok := msg.(*types.BlobSidecarsByRootReq)
|
||||
if !ok {
|
||||
return errors.New("message is not type BlobSidecarsByRootReq")
|
||||
}
|
||||
|
||||
columnIdents := *ref
|
||||
if err := validateDataColummnsByRootRequest(columnIdents); err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
return err
|
||||
}
|
||||
// Sort the identifiers so that requests for the same blob root will be adjacent, minimizing db lookups.
|
||||
sort.Sort(columnIdents)
|
||||
|
||||
// TODO: Customize data column batches too
|
||||
batchSize := flags.Get().BlobBatchLimit
|
||||
var ticker *time.Ticker
|
||||
if len(columnIdents) > batchSize {
|
||||
ticker = time.NewTicker(time.Second)
|
||||
}
|
||||
|
||||
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
|
||||
cs := s.cfg.clock.CurrentSlot()
|
||||
minReqSlot, err := DataColumnsRPCMinValidSlot(cs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unexpected error computing min valid blob request slot, current_slot=%d", cs)
|
||||
}
|
||||
|
||||
// Compute all custodied columns.
|
||||
custodiedColumns, err := peerdas.CustodyColumns(s.cfg.p2p.NodeID(), params.BeaconConfig().CustodyRequirement)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unexpected error retrieving the node id")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range columnIdents {
|
||||
if err := ctx.Err(); err != nil {
|
||||
closeStream(stream, log)
|
||||
return err
|
||||
}
|
||||
|
||||
// Throttle request processing to no more than batchSize/sec.
|
||||
if ticker != nil && i != 0 && i%batchSize == 0 {
|
||||
<-ticker.C
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
root, idx := bytesutil.ToBytes32(columnIdents[i].BlockRoot), columnIdents[i].Index
|
||||
|
||||
isCustodied := custodiedColumns[idx]
|
||||
if !isCustodied {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, types.ErrInvalidColumnIndex.Error(), stream)
|
||||
return types.ErrInvalidColumnIndex
|
||||
}
|
||||
|
||||
// TODO: Differentiate between blobs and columns for our storage engine
|
||||
sc, err := s.cfg.blobStorage.GetColumn(root, idx)
|
||||
if err != nil {
|
||||
if db.IsNotFound(err) {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": idx,
|
||||
}).Debugf("Peer requested data column sidecar by root not found in db")
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Errorf("unexpected db error retrieving data column, root=%x, index=%d", root, idx)
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return err
|
||||
}
|
||||
|
||||
// If any root in the request content references a block earlier than minimum_request_epoch,
|
||||
// peers MAY respond with error code 3: ResourceUnavailable or not include the data column in the response.
|
||||
// note: we are deviating from the spec to allow requests for data column that are before minimum_request_epoch,
|
||||
// up to the beginning of the retention period.
|
||||
if sc.SignedBlockHeader.Header.Slot < minReqSlot {
|
||||
s.writeErrorResponseToStream(responseCodeResourceUnavailable, types.ErrDataColumnLTMinRequest.Error(), stream)
|
||||
log.WithError(types.ErrDataColumnLTMinRequest).
|
||||
Debugf("requested data column for block %#x before minimum_request_epoch", columnIdents[i].BlockRoot)
|
||||
return types.ErrDataColumnLTMinRequest
|
||||
}
|
||||
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
return chunkErr
|
||||
}
|
||||
}
|
||||
closeStream(stream, log)
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDataColummnsByRootRequest(colIdents types.BlobSidecarsByRootReq) error {
|
||||
if uint64(len(colIdents)) > params.BeaconConfig().MaxRequestDataColumnSidecars {
|
||||
return types.ErrMaxDataColumnReqExceeded
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataColumnsRPCMinValidSlot(current primitives.Slot) (primitives.Slot, error) {
|
||||
// Avoid overflow if we're running on a config where deneb is set to far future epoch.
|
||||
if params.BeaconConfig().DenebForkEpoch == math.MaxUint64 || !features.Get().EnablePeerDAS {
|
||||
return primitives.Slot(math.MaxUint64), nil
|
||||
}
|
||||
minReqEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
currEpoch := slots.ToEpoch(current)
|
||||
minStart := params.BeaconConfig().DenebForkEpoch
|
||||
if currEpoch > minReqEpochs && currEpoch-minReqEpochs > minStart {
|
||||
minStart = currEpoch - minReqEpochs
|
||||
}
|
||||
return slots.EpochStart(minStart)
|
||||
}
|
||||
@@ -208,29 +208,6 @@ func SendBlobSidecarByRoot(
|
||||
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, blobValidatorFromRootReq(req), max)
|
||||
}
|
||||
|
||||
func SendDataColumnSidecarByRoot(
|
||||
ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.P2P, pid peer.ID,
|
||||
ctxMap ContextByteVersions, req *p2ptypes.BlobSidecarsByRootReq,
|
||||
) ([]blocks.ROBlob, error) {
|
||||
if uint64(len(*req)) > params.BeaconConfig().MaxRequestDataColumnSidecars {
|
||||
return nil, errors.Wrapf(p2ptypes.ErrMaxDataColumnReqExceeded, "length=%d", len(*req))
|
||||
}
|
||||
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("topic", topic).Debug("Sending data column sidecar request")
|
||||
stream, err := p2pApi.Send(ctx, req, topic, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
maxCol := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, blobValidatorFromRootReq(req), maxCol)
|
||||
}
|
||||
|
||||
// BlobResponseValidation represents a function that can validate aspects of a single unmarshaled blob
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type BlobResponseValidation func(blocks.ROBlob) error
|
||||
|
||||
@@ -482,10 +482,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
func TestBlobValidatorFromRootReq(t *testing.T) {
|
||||
rootA := bytesutil.PadTo([]byte("valid"), 32)
|
||||
rootB := bytesutil.PadTo([]byte("invalid"), 32)
|
||||
header := ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{Slot: 0},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
header := ðpb.SignedBeaconBlockHeader{}
|
||||
blobSidecarA0 := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(rootA), header, 0, []byte{}, make([][]byte, 0))
|
||||
blobSidecarA1 := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(rootA), header, 1, []byte{}, make([][]byte, 0))
|
||||
blobSidecarB0 := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(rootB), header, 0, []byte{}, make([][]byte, 0))
|
||||
@@ -593,8 +590,7 @@ func TestBlobValidatorFromRangeReq(t *testing.T) {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
vf := blobValidatorFromRangeReq(c.req)
|
||||
header := ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{Slot: c.responseSlot},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
Header: ðpb.BeaconBlockHeader{Slot: c.responseSlot},
|
||||
}
|
||||
sc := util.GenerateTestDenebBlobSidecar(t, [32]byte{}, header, 0, []byte{}, make([][]byte, 0))
|
||||
err := vf(sc)
|
||||
|
||||
@@ -51,8 +51,7 @@ var _ runtime.Service = (*Service)(nil)
|
||||
|
||||
const rangeLimit uint64 = 1024
|
||||
const seenBlockSize = 1000
|
||||
const seenBlobSize = seenBlockSize * 6 // Each block can have max 6 blobs.
|
||||
const seenDataColumnSize = seenBlockSize * 128 // Each block can have max 128 data columns.
|
||||
const seenBlobSize = seenBlockSize * 4 // Each block can have max 4 blobs. Worst case 164kB for cache.
|
||||
const seenUnaggregatedAttSize = 20000
|
||||
const seenAggregatedAttSize = 16384
|
||||
const seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
|
||||
@@ -103,7 +102,6 @@ type config struct {
|
||||
type blockchainService interface {
|
||||
blockchain.BlockReceiver
|
||||
blockchain.BlobReceiver
|
||||
blockchain.DataColumnReceiver
|
||||
blockchain.HeadFetcher
|
||||
blockchain.FinalizationFetcher
|
||||
blockchain.ForkFetcher
|
||||
@@ -135,8 +133,6 @@ type Service struct {
|
||||
seenBlockCache *lru.Cache
|
||||
seenBlobLock sync.RWMutex
|
||||
seenBlobCache *lru.Cache
|
||||
seenDataColumnLock sync.RWMutex
|
||||
seenDataColumnCache *lru.Cache
|
||||
seenAggregatedAttestationLock sync.RWMutex
|
||||
seenAggregatedAttestationCache *lru.Cache
|
||||
seenUnAggregatedAttestationLock sync.RWMutex
|
||||
@@ -280,7 +276,6 @@ func (s *Service) Status() error {
|
||||
func (s *Service) initCaches() {
|
||||
s.seenBlockCache = lruwrpr.New(seenBlockSize)
|
||||
s.seenBlobCache = lruwrpr.New(seenBlobSize)
|
||||
s.seenDataColumnCache = lruwrpr.New(seenDataColumnSize)
|
||||
s.seenAggregatedAttestationCache = lruwrpr.New(seenAggregatedAttSize)
|
||||
s.seenUnAggregatedAttestationCache = lruwrpr.New(seenUnaggregatedAttSize)
|
||||
s.seenSyncMessageCache = lruwrpr.New(seenSyncMsgSize)
|
||||
|
||||
@@ -137,32 +137,13 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
|
||||
// New Gossip Topic in Deneb
|
||||
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
if features.Get().EnablePeerDAS {
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.DataColumnSubnetTopicFormat,
|
||||
s.validateDataColumn, /* validator */
|
||||
s.dataColumnSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconConfig().DataColumnSidecarSubnetCount,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithColumnSubnets(
|
||||
p2p.DataColumnSubnetTopicFormat,
|
||||
s.validateDataColumn, /* validator */
|
||||
s.dataColumnSubscriber, /* message handler */
|
||||
digest,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.BlobSubnetTopicFormat,
|
||||
s.validateBlob, /* validator */
|
||||
s.blobSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconConfig().BlobsidecarSubnetCount,
|
||||
)
|
||||
}
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.BlobSubnetTopicFormat,
|
||||
s.validateBlob, /* validator */
|
||||
s.blobSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconConfig().BlobsidecarSubnetCount,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -665,87 +646,6 @@ func (s *Service) subscribeDynamicWithSyncSubnets(
|
||||
}()
|
||||
}
|
||||
|
||||
// subscribe missing subnets for our persistent columns.
|
||||
func (s *Service) subscribeColumnSubnet(
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
idx uint64,
|
||||
digest [4]byte,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) {
|
||||
// do not subscribe if we have no peers in the same
|
||||
// subnet
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.DataColumnSidecar{})]
|
||||
subnetTopic := fmt.Sprintf(topic, digest, idx)
|
||||
// check if subscription exists and if not subscribe the relevant subnet.
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to column gossip subnet with "+
|
||||
"column index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) subscribeDynamicWithColumnSubnets(
|
||||
topicFormat string,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topicFormat, e)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().DataColumnSidecarSubnetCount)
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Column subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
|
||||
wantedSubs := s.retrieveActiveColumnSubnets()
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
|
||||
// subscribe desired column subnets.
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeColumnSubnet(subscriptions, idx, digest, validate, handle)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// lookup peers for attester specific subnets.
|
||||
func (s *Service) lookupAttesterSubnets(digest [4]byte, idx uint64) {
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})]
|
||||
@@ -797,14 +697,6 @@ func (*Service) retrieveActiveSyncSubnets(currEpoch primitives.Epoch) []uint64 {
|
||||
return slice.SetUint64(subs)
|
||||
}
|
||||
|
||||
func (*Service) retrieveActiveColumnSubnets() []uint64 {
|
||||
subs, ok, _ := cache.ColumnSubnetIDs.GetColumnSubnets()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return subs
|
||||
}
|
||||
|
||||
// filters out required peers for the node to function, not
|
||||
// pruning peers who are in our attestation subnets.
|
||||
func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func (s *Service) blobSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
b, ok := msg.(blocks.VerifiedROBlob)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type blocks.VerifiedROBlob, type=%T", msg)
|
||||
return fmt.Errorf("message was not type blocks.ROBlob, type=%T", msg)
|
||||
}
|
||||
|
||||
s.setSeenBlobIndex(b.Slot(), b.ProposerIndex(), b.Index)
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
dc, ok := msg.(blocks.VerifiedRODataColumn)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
|
||||
}
|
||||
|
||||
s.setSeenDataColumnIndex(dc.SignedBlockHeader.Header.Slot, dc.SignedBlockHeader.Header.ProposerIndex, dc.ColumnIndex)
|
||||
|
||||
if err := s.cfg.chain.ReceiveDataColumn(ctx, dc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.DataColumnSidecarReceived,
|
||||
Data: &opfeed.DataColumnSidecarReceivedData{
|
||||
DataColumn: &dc,
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
coreBlocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
receivedTime := prysmTime.Now()
|
||||
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, errInvalidTopic
|
||||
}
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
ds, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
if ds.ColumnIndex >= params.BeaconConfig().NumberOfColumns {
|
||||
return pubsub.ValidationReject, errors.Errorf("invalid column index provided, got %d", ds.ColumnIndex)
|
||||
}
|
||||
want := fmt.Sprintf("data_column_sidecar_%d", computeSubnetForColumnSidecar(ds.ColumnIndex))
|
||||
if !strings.Contains(*msg.Topic, want) {
|
||||
log.Debug("Column Sidecar index does not match topic")
|
||||
return pubsub.ValidationReject, fmt.Errorf("wrong topic name: %s", *msg.Topic)
|
||||
}
|
||||
if err := slots.VerifyTime(uint64(s.cfg.clock.GenesisTime().Unix()), ds.SignedBlockHeader.Header.Slot, params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
log.WithError(err).Debug("Ignored sidecar: could not verify slot time")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
cp := s.cfg.chain.FinalizedCheckpt()
|
||||
startSlot, err := slots.EpochStart(cp.Epoch)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Ignored column sidecar: could not calculate epoch start slot")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
if startSlot >= ds.SignedBlockHeader.Header.Slot {
|
||||
err := fmt.Errorf("finalized slot %d greater or equal to block slot %d", startSlot, ds.SignedBlockHeader.Header.Slot)
|
||||
log.Debug(err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
// Handle sidecar when the parent is unknown.
|
||||
if !s.cfg.chain.HasBlock(ctx, [32]byte(ds.SignedBlockHeader.Header.ParentRoot)) {
|
||||
err := errors.Errorf("unknown parent for data column sidecar with slot %d and parent root %#x", ds.SignedBlockHeader.Header.Slot, ds.SignedBlockHeader.Header.ParentRoot)
|
||||
log.WithError(err).Debug("Could not identify parent for data column sidecar")
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if s.hasBadBlock([32]byte(ds.SignedBlockHeader.Header.ParentRoot)) {
|
||||
bRoot, err := ds.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
s.setBadBlock(ctx, bRoot)
|
||||
return pubsub.ValidationReject, errors.Errorf("column sidecar with bad parent provided")
|
||||
}
|
||||
parentSlot, err := s.cfg.chain.RecentBlockSlot([32]byte(ds.SignedBlockHeader.Header.ParentRoot))
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if ds.SignedBlockHeader.Header.Slot <= parentSlot {
|
||||
return pubsub.ValidationReject, errors.Errorf("invalid column sidecar slot: %d", ds.SignedBlockHeader.Header.Slot)
|
||||
}
|
||||
if !s.cfg.chain.InForkchoice([32]byte(ds.SignedBlockHeader.Header.ParentRoot)) {
|
||||
return pubsub.ValidationReject, blockchain.ErrNotDescendantOfFinalized
|
||||
}
|
||||
// TODO Verify KZG inclusion proof of data column sidecar
|
||||
|
||||
// TODO Verify KZG proofs of column sidecar
|
||||
|
||||
parentState, err := s.cfg.stateGen.StateByRoot(ctx, [32]byte(ds.SignedBlockHeader.Header.ParentRoot))
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
if err := coreBlocks.VerifyBlockHeaderSignatureUsingCurrentFork(parentState, ds.SignedBlockHeader); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
// In the event the block is more than an epoch ahead from its
|
||||
// parent state, we have to advance the state forward.
|
||||
parentRoot := ds.SignedBlockHeader.Header.ParentRoot
|
||||
parentState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, parentState, parentRoot, ds.SignedBlockHeader.Header.Slot)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, parentState)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
if ds.SignedBlockHeader.Header.ProposerIndex != idx {
|
||||
return pubsub.ValidationReject, errors.New("incorrect proposer index")
|
||||
}
|
||||
|
||||
startTime, err := slots.ToTime(uint64(s.cfg.chain.GenesisTime().Unix()), ds.SignedBlockHeader.Header.Slot)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
sinceSlotStartTime := receivedTime.Sub(startTime)
|
||||
validationTime := s.cfg.clock.Now().Sub(receivedTime)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"sinceSlotStartTime": sinceSlotStartTime,
|
||||
"validationTime": validationTime,
|
||||
}).Debug("Received data column sidecar")
|
||||
|
||||
// TODO: Transform this whole function so it looks like to the `validateBlob`
|
||||
// with the tiny verifiers inside.
|
||||
roDataColumn, err := blocks.NewRODataColumn(ds)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, errors.Wrap(err, "new RO data columns")
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
|
||||
msg.ValidatorData = verifiedRODataColumn
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Sets the data column with the same slot, proposer index, and data column index as seen.
|
||||
func (s *Service) setSeenDataColumnIndex(slot primitives.Slot, proposerIndex primitives.ValidatorIndex, index uint64) {
|
||||
s.seenDataColumnLock.Lock()
|
||||
defer s.seenDataColumnLock.Unlock()
|
||||
|
||||
b := append(bytesutil.Bytes32(uint64(slot)), bytesutil.Bytes32(uint64(proposerIndex))...)
|
||||
b = append(b, bytesutil.Bytes32(index)...)
|
||||
s.seenDataColumnCache.Add(string(b), true)
|
||||
}
|
||||
|
||||
func computeSubnetForColumnSidecar(colIdx uint64) uint64 {
|
||||
return colIdx % params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
@@ -46,12 +46,10 @@ func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.
|
||||
if len(scs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
blkSig := blk.Signature()
|
||||
// We assume the proposer is validated wrt the block in batch block processing before performing the DA check.
|
||||
// So at this stage we just need to make sure the value being signed and signature bytes match the block.
|
||||
for i := range scs {
|
||||
blobSig := bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature)
|
||||
if blkSig != blobSig {
|
||||
if blk.Signature() != bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature) {
|
||||
return nil, ErrBatchSignatureMismatch
|
||||
}
|
||||
// Extra defensive check to make sure the roots match. This should be unnecessary in practice since the root from
|
||||
|
||||
@@ -6,9 +6,10 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/jwt",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -50,7 +52,7 @@ func generateAuthSecretInFile(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
secret, err := api.GenerateRandomHexString()
|
||||
secret, err := generateRandomHexString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -60,3 +62,15 @@ func generateAuthSecretInFile(c *cli.Context) error {
|
||||
logrus.Infof("Successfully wrote JSON-RPC authentication secret to file %s", fileName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateRandomHexString() (string, error) {
|
||||
secret := make([]byte, 32)
|
||||
randGen := rand.NewGenerator()
|
||||
n, err := randGen.Read(secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if n <= 0 {
|
||||
return "", errors.New("rand: unexpected length")
|
||||
}
|
||||
return hexutil.Encode(secret), nil
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ go_library(
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -134,15 +133,6 @@ var (
|
||||
Usage: "Port used to listening and respond metrics for Prometheus.",
|
||||
Value: 8081,
|
||||
}
|
||||
|
||||
// AuthTokenPathFlag defines the path to the auth token used to secure the validator api.
|
||||
AuthTokenPathFlag = &cli.StringFlag{
|
||||
Name: "keymanager-token-file",
|
||||
Usage: "Path to auth token file used for validator apis.",
|
||||
Value: filepath.Join(filepath.Join(DefaultValidatorDir(), WalletDefaultDirName), api.AuthTokenFileName),
|
||||
Aliases: []string{"validator-api-bearer-file"},
|
||||
}
|
||||
|
||||
// WalletDirFlag defines the path to a wallet directory for Prysm accounts.
|
||||
WalletDirFlag = &cli.StringFlag{
|
||||
Name: "wallet-dir",
|
||||
|
||||
@@ -75,7 +75,6 @@ var appFlags = []cli.Flag{
|
||||
flags.EnableWebFlag,
|
||||
flags.GraffitiFileFlag,
|
||||
flags.EnableDistributed,
|
||||
flags.AuthTokenPathFlag,
|
||||
// Consensys' Web3Signer flags
|
||||
flags.Web3SignerURLFlag,
|
||||
flags.Web3SignerPublicValidatorKeysFlag,
|
||||
|
||||
@@ -123,7 +123,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.BuilderGasLimitFlag,
|
||||
flags.ValidatorsRegistrationBatchSizeFlag,
|
||||
flags.EnableDistributed,
|
||||
flags.AuthTokenPathFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -9,7 +9,6 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/cmd/validator/web",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
|
||||
@@ -2,9 +2,7 @@ package web
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
@@ -26,7 +24,6 @@ var Commands = &cli.Command{
|
||||
flags.WalletDirFlag,
|
||||
flags.GRPCGatewayHost,
|
||||
flags.GRPCGatewayPort,
|
||||
flags.AuthTokenPathFlag,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
@@ -46,12 +43,7 @@ var Commands = &cli.Command{
|
||||
gatewayHost := cliCtx.String(flags.GRPCGatewayHost.Name)
|
||||
gatewayPort := cliCtx.Int(flags.GRPCGatewayPort.Name)
|
||||
validatorWebAddr := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||
authTokenPath := filepath.Join(walletDirPath, api.AuthTokenFileName)
|
||||
tempAuthTokenPath := cliCtx.String(flags.AuthTokenPathFlag.Name)
|
||||
if tempAuthTokenPath != "" {
|
||||
authTokenPath = tempAuthTokenPath
|
||||
}
|
||||
if err := rpc.CreateAuthToken(authTokenPath, validatorWebAddr); err != nil {
|
||||
if err := rpc.CreateAuthToken(walletDirPath, validatorWebAddr); err != nil {
|
||||
log.WithError(err).Fatal("Could not create web auth token")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -73,8 +73,6 @@ type Flags struct {
|
||||
PrepareAllPayloads bool // PrepareAllPayloads informs the engine to prepare a block on every slot.
|
||||
// BlobSaveFsync requires blob saving to block on fsync to ensure blobs are durably persisted before passing DA.
|
||||
BlobSaveFsync bool
|
||||
// EnablePeerDAS enables running the node with the experimental data availability sampling scheme.
|
||||
EnablePeerDAS bool
|
||||
|
||||
SaveInvalidBlock bool // SaveInvalidBlock saves invalid block to temp.
|
||||
SaveInvalidBlob bool // SaveInvalidBlob saves invalid blob to temp.
|
||||
@@ -261,10 +259,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(EnableQUIC)
|
||||
cfg.EnableQUIC = true
|
||||
}
|
||||
if ctx.IsSet(EnablePeerDAS.Name) {
|
||||
logEnabled(EnablePeerDAS)
|
||||
cfg.EnablePeerDAS = true
|
||||
}
|
||||
|
||||
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
|
||||
Init(cfg)
|
||||
|
||||
@@ -170,10 +170,6 @@ var (
|
||||
Name: "enable-quic",
|
||||
Usage: "Enables connection using the QUIC protocol for peers which support it.",
|
||||
}
|
||||
EnablePeerDAS = &cli.BoolFlag{
|
||||
Name: "peer-das",
|
||||
Usage: "Enables Prysm to run with the experimental peer data availability sampling scheme.",
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
@@ -232,7 +228,6 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
|
||||
EnableLightClient,
|
||||
BlobSaveFsync,
|
||||
EnableQUIC,
|
||||
EnablePeerDAS,
|
||||
}...)...)
|
||||
|
||||
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user