mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
65 Commits
v5.0.2
...
peerDASHac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed55928153 | ||
|
|
c3598c6939 | ||
|
|
f2d956d688 | ||
|
|
a5af4d8a90 | ||
|
|
5976319a85 | ||
|
|
3fdb85a37d | ||
|
|
6c22814ed1 | ||
|
|
f2019dd053 | ||
|
|
92de4a1274 | ||
|
|
e498183d74 | ||
|
|
9a98e194c0 | ||
|
|
ebcc1404b6 | ||
|
|
2bef9563a1 | ||
|
|
a6f134e48e | ||
|
|
fdbb5136d9 | ||
|
|
2c66918594 | ||
|
|
a0dac292ff | ||
|
|
75857e7177 | ||
|
|
0369f70b0b | ||
|
|
276b97530f | ||
|
|
d5daf49a9a | ||
|
|
feb16ae4aa | ||
|
|
219301339c | ||
|
|
aec349f75a | ||
|
|
5f909caedf | ||
|
|
ba6dff3adb | ||
|
|
8cd05f098b | ||
|
|
425f5387fa | ||
|
|
f2ce115ade | ||
|
|
090a3e1ded | ||
|
|
c0acb7d352 | ||
|
|
0d6070e6fc | ||
|
|
bd00f851f0 | ||
|
|
1a0c07deec | ||
|
|
04f231a400 | ||
|
|
be1bfcce63 | ||
|
|
8cf5d79852 | ||
|
|
f7912e7c20 | ||
|
|
caa8be5dd1 | ||
|
|
0c15a30a34 | ||
|
|
7bce1c0714 | ||
|
|
d1084cbe48 | ||
|
|
2cc3f69a3f | ||
|
|
a861489a83 | ||
|
|
0e1c585f7d | ||
|
|
9df20e616c | ||
|
|
53fdd2d062 | ||
|
|
2b4bb5d890 | ||
|
|
38f208d70d | ||
|
|
65b90abdda | ||
|
|
f3b49d4eaf | ||
|
|
5b1da7353c | ||
|
|
9f17e65860 | ||
|
|
9b2d53b0d1 | ||
|
|
d6f9196707 | ||
|
|
1b0e09369e | ||
|
|
12482eeb40 | ||
|
|
acc307b959 | ||
|
|
c1d75c295a | ||
|
|
fad118cb04 | ||
|
|
cdd1d819df | ||
|
|
97edffaff5 | ||
|
|
6de7df6b9d | ||
|
|
14d7416c16 | ||
|
|
6782df917a |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -41,3 +41,6 @@ jwt.hex
|
||||
|
||||
# manual testing
|
||||
tmp
|
||||
|
||||
# spectest coverage reports
|
||||
report.txt
|
||||
|
||||
44
WORKSPACE
44
WORKSPACE
@@ -29,23 +29,7 @@ http_archive(
|
||||
|
||||
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
|
||||
|
||||
# Temporarily use a nightly build until 0.12.0 is released.
|
||||
# See: https://github.com/prysmaticlabs/prysm/issues/13130
|
||||
zig_toolchains(
|
||||
host_platform_sha256 = {
|
||||
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
|
||||
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
|
||||
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
|
||||
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
|
||||
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
|
||||
},
|
||||
url_formats = [
|
||||
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
|
||||
],
|
||||
version = "0.12.0-dev.1349+fa022d1ec",
|
||||
)
|
||||
zig_toolchains()
|
||||
|
||||
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
|
||||
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
|
||||
@@ -243,7 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.4.0"
|
||||
consensus_spec_version = "v1.5.0-alpha.0"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -259,7 +243,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
|
||||
sha256 = "33c5547772b6d8d6f041dff7e7d26b0358c2392daed34394a3aa81147812a81c",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
|
||||
sha256 = "06f286199cf2fedd4700487fb8feb0904e0ae18daaa4b3f70ea430ca9c388167",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -291,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
|
||||
sha256 = "5f2a4452b323075eba6bf950003f7d91fd04ebcbde5bd087beafb5d6f6325ad4",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -306,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
|
||||
sha256 = "fd7e83e8cbeb3e297f2aeb93776305f7d606272c97834d8d9be673984501ed36",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -342,22 +326,6 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "goerli_testnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = [
|
||||
"prater/config.yaml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
|
||||
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
|
||||
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "holesky_testnet",
|
||||
build_file_content = """
|
||||
|
||||
@@ -1,11 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"constants.go",
|
||||
"headers.go",
|
||||
"jwt.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//crypto/rand:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["jwt_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -4,4 +4,6 @@ const (
|
||||
WebUrlPrefix = "/v2/validator/"
|
||||
WebApiUrlPrefix = "/api/v2/validator/"
|
||||
KeymanagerApiPrefix = "/eth/v1"
|
||||
|
||||
AuthTokenFileName = "auth-token"
|
||||
)
|
||||
|
||||
32
api/jwt.go
Normal file
32
api/jwt.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
)
|
||||
|
||||
// GenerateRandomHexString generates a random hex string that follows the standards for jwt token
|
||||
// used for beacon node -> execution client
|
||||
// used for web client -> validator client
|
||||
func GenerateRandomHexString() (string, error) {
|
||||
secret := make([]byte, 32)
|
||||
randGen := rand.NewGenerator()
|
||||
n, err := randGen.Read(secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if n != 32 {
|
||||
return "", errors.New("rand: unexpected length")
|
||||
}
|
||||
return hexutil.Encode(secret), nil
|
||||
}
|
||||
|
||||
// ValidateAuthToken validating auth token for web
|
||||
func ValidateAuthToken(token string) error {
|
||||
b, err := hexutil.Decode(token)
|
||||
// token should be hex-encoded and at least 256 bits
|
||||
if err != nil || len(b) < 32 {
|
||||
return errors.New("invalid auth token: token should be hex-encoded and at least 256 bits")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
13
api/jwt_test.go
Normal file
13
api/jwt_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestGenerateRandomHexString(t *testing.T) {
|
||||
token, err := GenerateRandomHexString()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ValidateAuthToken(token))
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestToogleMultipleTimes(t *testing.T) {
|
||||
func TestToggleMultipleTimes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
v := New()
|
||||
@@ -101,16 +101,16 @@ func TestToogleMultipleTimes(t *testing.T) {
|
||||
|
||||
expected := i%2 != 0
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
|
||||
}
|
||||
|
||||
if pre == v.IsSet() {
|
||||
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
|
||||
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToogleAfterOverflow(t *testing.T) {
|
||||
func TestToggleAfterOverflow(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var value int32 = math.MaxInt32
|
||||
@@ -122,7 +122,7 @@ func TestToogleAfterOverflow(t *testing.T) {
|
||||
v.Toggle()
|
||||
expected := math.MaxInt32%2 == 0
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
}
|
||||
|
||||
// make sure overflow happened
|
||||
@@ -135,7 +135,7 @@ func TestToogleAfterOverflow(t *testing.T) {
|
||||
v.Toggle()
|
||||
expected = !expected
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ package event
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -219,12 +220,9 @@ type caseList []reflect.SelectCase
|
||||
|
||||
// find returns the index of a case containing the given channel.
|
||||
func (cs caseList) find(channel interface{}) int {
|
||||
for i, cas := range cs {
|
||||
if cas.Chan.Interface() == channel {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
|
||||
return selectCase.Chan.Interface() == channel
|
||||
})
|
||||
}
|
||||
|
||||
// delete removes the given case from cs.
|
||||
|
||||
@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
|
||||
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
|
||||
func calculateChunkSize(items int) int {
|
||||
// Start with a simple even split
|
||||
chunkSize := items / runtime.GOMAXPROCS(0)
|
||||
|
||||
2
bazel.sh
2
bazel.sh
@@ -2,7 +2,7 @@
|
||||
|
||||
# This script serves as a wrapper around bazel to limit the scope of environment variables that
|
||||
# may change the action output. Using this script should result in a higher cache hit ratio for
|
||||
# cached actions with a more heremtic build.
|
||||
# cached actions with a more hermetic build.
|
||||
|
||||
env -i \
|
||||
PATH=/usr/bin:/bin \
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -157,6 +159,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -399,14 +398,6 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||
// chain known to forkchoice
|
||||
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
@@ -518,13 +509,6 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
|
||||
return ar[:], nil
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
|
||||
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -256,7 +256,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
require.NotEqual(t, 0, len(indices))
|
||||
}
|
||||
|
||||
|
||||
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
@@ -514,12 +515,35 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if features.Get().EnablePeerDAS {
|
||||
return s.isDataAvailableDataColumns(ctx, root, signed)
|
||||
}
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -591,6 +615,86 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) isDataAvailableDataColumns(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), params.BeaconConfig().CustodyRequirement)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// expected is the number of custodied data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
// get a map of data column indices that are not currently available.
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The gossip handler for data columns writes the index of each verified data column referencing the given
|
||||
// root to the channel returned by blobNotifiers.forRoot.
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case idx := <-nc:
|
||||
// Delete each index seen in the notification channel.
|
||||
delete(missing, idx)
|
||||
// Read from the channel until there are no more missing sidecars.
|
||||
if len(missing) > 0 {
|
||||
continue
|
||||
}
|
||||
// Once all sidecars have been observed, clean up the notification channel.
|
||||
s.blobNotifiers.delete(root)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
|
||||
@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
|
||||
|
||||
// logNonCanonicalBlockReceived prints a message informing that the received
|
||||
// block is not the head of the chain. It requires the caller holds a lock on
|
||||
// Foprkchoice.
|
||||
// Forkchoice.
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
|
||||
@@ -50,6 +50,12 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(context.Context, blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
|
||||
@@ -170,7 +176,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go s.sendNewFinalizedEvent(blockCopy, postState)
|
||||
go s.sendNewFinalizedEvent(ctx, postState)
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
@@ -443,7 +449,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
|
||||
|
||||
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
|
||||
// event feed. It needs to be called on the background
|
||||
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
|
||||
func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.BeaconState) {
|
||||
isValidPayload := false
|
||||
s.headLock.RLock()
|
||||
if s.head != nil {
|
||||
@@ -451,8 +457,17 @@ func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBl
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
blk, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve block for finalized checkpoint root. Finalized event will not be emitted")
|
||||
return
|
||||
}
|
||||
if blk == nil || blk.IsNil() || blk.Block() == nil || blk.Block().IsNil() {
|
||||
log.WithError(err).Error("Block retrieved for finalized checkpoint root is nil. Finalized event will not be emitted")
|
||||
return
|
||||
}
|
||||
stateRoot := blk.Block().StateRoot()
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
stateRoot := signed.Block().StateRoot()
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
@@ -488,7 +503,10 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
if err != nil {
|
||||
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
|
||||
@@ -8,12 +8,14 @@ import (
|
||||
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -378,3 +380,38 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_sendNewFinalizedEvent(t *testing.T) {
|
||||
s, _ := minimalTestService(t)
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
finalizedSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
finalizedStRoot, err := finalizedSt.HashTreeRoot(s.ctx)
|
||||
require.NoError(t, err)
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.StateRoot = finalizedStRoot[:]
|
||||
sbb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
sbbRoot, err := sbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(s.ctx, sbb))
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFinalizedCheckpoint(ðpb.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: sbbRoot[:],
|
||||
}))
|
||||
|
||||
s.sendNewFinalizedEvent(s.ctx, st)
|
||||
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, sbbRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
}
|
||||
|
||||
18
beacon-chain/blockchain/receive_data_column.go
Normal file
18
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ctx context.Context, ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO use a custom event or new method of for data columns. For speed
|
||||
// we are simply reusing blob paths here.
|
||||
s.sendNewBlobEvent(ds.BlockRoot(), uint64(ds.SignedBlockHeader.Header.Slot))
|
||||
return nil
|
||||
}
|
||||
@@ -82,7 +82,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
|
||||
@@ -95,7 +95,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -44,6 +45,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -64,6 +70,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (c *ChainService) ReceiveDataColumn(_ context.Context, _ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -7,6 +7,7 @@ go_library(
|
||||
"active_balance_disabled.go", # keep
|
||||
"attestation_data.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
|
||||
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
subLength := epochDuration * time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
// Perform this in a nonsensical ordering
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0))
|
||||
|
||||
@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
// Perform this in a nonsensical ordering
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -32,6 +32,9 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -77,3 +80,7 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing *ethpb.AttesterSlashing
|
||||
}
|
||||
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -50,6 +50,8 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -133,9 +132,6 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx primitives.
|
||||
//
|
||||
// In the attestation must be within the range of 95 to 102 in the example above.
|
||||
func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
}
|
||||
attTime, err := slots.ToTime(uint64(genesisTime.Unix()), attSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -182,24 +178,15 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
|
||||
}
|
||||
|
||||
// EIP-7045: Starting in Deneb, allow any attestations from the current or previous epoch.
|
||||
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
prevEpoch, err := currentEpoch.SafeSub(1)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Ignoring underflow for a deneb attestation inclusion check in epoch 0")
|
||||
prevEpoch = 0
|
||||
}
|
||||
attSlotEpoch := slots.ToEpoch(attSlot)
|
||||
if attSlotEpoch != currentEpoch && attSlotEpoch != prevEpoch {
|
||||
if attEpoch+1 < currentEpoch {
|
||||
attError = fmt.Errorf(
|
||||
"attestation epoch %d not within current epoch %d or previous epoch %d",
|
||||
attSlot/params.BeaconConfig().SlotsPerEpoch,
|
||||
"attestation epoch %d not within current epoch %d or previous epoch",
|
||||
attEpoch,
|
||||
currentEpoch,
|
||||
prevEpoch,
|
||||
)
|
||||
return errors.Join(ErrTooLate, attError)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -197,7 +197,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
|
||||
).Add(200 * time.Millisecond),
|
||||
},
|
||||
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch 14",
|
||||
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch",
|
||||
},
|
||||
{
|
||||
name: "attestation.slot is well beyond current slot",
|
||||
@@ -205,7 +205,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
attSlot: 1 << 32,
|
||||
genesisTime: prysmTime.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
wantedErr: "which exceeds max allowed value relative to the local clock",
|
||||
wantedErr: "attestation slot 4294967296 not within attestation propagation range of 0 to 15 (current slot)",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -140,7 +140,7 @@ func BeaconCommittee(
|
||||
}
|
||||
count := committeesPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
return computeCommittee(validatorIndices, seed, indexOffset, count)
|
||||
return ComputeCommittee(validatorIndices, seed, indexOffset, count)
|
||||
}
|
||||
|
||||
// CommitteeAssignmentContainer represents a committee list, committee index, and to be attested slot for a given epoch.
|
||||
@@ -359,7 +359,7 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
|
||||
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -409,7 +409,7 @@ func ClearCache() {
|
||||
balanceCache.Clear()
|
||||
}
|
||||
|
||||
// computeCommittee returns the requested shuffled committee out of the total committees using
|
||||
// ComputeCommittee returns the requested shuffled committee out of the total committees using
|
||||
// validator indices and seed.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
@@ -424,7 +424,7 @@ func ClearCache() {
|
||||
// start = (len(indices) * index) // count
|
||||
// end = (len(indices) * uint64(index + 1)) // count
|
||||
// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
|
||||
func computeCommittee(
|
||||
func ComputeCommittee(
|
||||
indices []primitives.ValidatorIndex,
|
||||
seed [32]byte,
|
||||
index, count uint64,
|
||||
@@ -451,9 +451,9 @@ func computeCommittee(
|
||||
return shuffledList[start:end], nil
|
||||
}
|
||||
|
||||
// This computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
// PrecomputeProposerIndices computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
// the index of the list represents the slot number.
|
||||
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
func PrecomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -21,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create 10 committees
|
||||
committeeCount := uint64(10)
|
||||
@@ -48,16 +49,16 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(t, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
committees, err := computeCommittee(indices, seed, 0, 1 /* Total committee*/)
|
||||
committees, err := helpers.ComputeCommittee(indices, seed, 0, 1 /* Total committee*/)
|
||||
assert.NoError(t, err, "Could not compute committee")
|
||||
|
||||
// Test shuffled indices are correct for index 5 committee
|
||||
index := uint64(5)
|
||||
committee5, err := computeCommittee(indices, seed, index, committeeCount)
|
||||
committee5, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
|
||||
assert.NoError(t, err, "Could not compute committee")
|
||||
start := slice.SplitOffset(validatorCount, committeeCount, index)
|
||||
end := slice.SplitOffset(validatorCount, committeeCount, index+1)
|
||||
@@ -65,7 +66,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
|
||||
// Test shuffled indices are correct for index 9 committee
|
||||
index = uint64(9)
|
||||
committee9, err := computeCommittee(indices, seed, index, committeeCount)
|
||||
committee9, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
|
||||
assert.NoError(t, err, "Could not compute committee")
|
||||
start = slice.SplitOffset(validatorCount, committeeCount, index)
|
||||
end = slice.SplitOffset(validatorCount, committeeCount, index+1)
|
||||
@@ -73,42 +74,42 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestComputeCommittee_RegressionTest(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
indices := []primitives.ValidatorIndex{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
|
||||
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
|
||||
index := uint64(215)
|
||||
count := uint64(32)
|
||||
_, err := computeCommittee(indices, seed, index, count)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, count)
|
||||
require.ErrorContains(t, "index out of range", err)
|
||||
}
|
||||
|
||||
func TestVerifyBitfieldLength_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
bf := bitfield.Bitlist{0xFF, 0x01}
|
||||
committeeSize := uint64(8)
|
||||
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
|
||||
bf = bitfield.Bitlist{0xFF, 0x07}
|
||||
committeeSize = 10
|
||||
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
epoch := primitives.Epoch(1)
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 0, // Epoch 0.
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = CommitteeAssignments(context.Background(), state, epoch+1)
|
||||
_, _, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1)
|
||||
assert.ErrorContains(t, "can't be greater than next epoch", err)
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -127,7 +128,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
|
||||
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
for _, ss := range proposerIndexToSlots {
|
||||
for _, s := range ss {
|
||||
@@ -198,9 +199,9 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
validatorIndexToCommittee, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
cac := validatorIndexToCommittee[tt.index]
|
||||
assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index)
|
||||
@@ -215,7 +216,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -237,17 +238,17 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndxs, err := CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
_, proposerIndxs, err := helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
|
||||
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
_, proposerIndxs, err = helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -263,12 +264,12 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = CommitteeAssignments(context.Background(), state, 0)
|
||||
_, _, err = helpers.CommitteeAssignments(context.Background(), state, 0)
|
||||
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
@@ -285,7 +286,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
epoch := primitives.Epoch(1)
|
||||
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, epoch)
|
||||
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, epoch)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
|
||||
slotsWithProposers := make(map[primitives.Slot]bool)
|
||||
@@ -391,10 +392,10 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.stateSlot))
|
||||
err := VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
|
||||
err := helpers.VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
|
||||
if tt.verificationFailure {
|
||||
assert.NotNil(t, err, "Verification succeeded when it was supposed to fail")
|
||||
} else {
|
||||
@@ -404,7 +405,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
@@ -421,20 +422,20 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
|
||||
|
||||
epoch := primitives.Epoch(0)
|
||||
idx := primitives.CommitteeIndex(1)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err = committeeCache.Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
|
||||
indices, err = helpers.CommitteeCache().Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
|
||||
}
|
||||
|
||||
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
@@ -452,19 +453,19 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
e := time.CurrentEpoch(state)
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e))
|
||||
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
|
||||
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(seed[:])))
|
||||
|
||||
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
|
||||
nextSeed, err := helpers.Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
|
||||
require.Equal(t, false, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
|
||||
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e+1))
|
||||
|
||||
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
|
||||
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
|
||||
}
|
||||
|
||||
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
@@ -481,20 +482,20 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
index := uint64(3)
|
||||
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -515,20 +516,20 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
index := uint64(3)
|
||||
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -549,9 +550,9 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
i := uint64(0)
|
||||
@@ -559,7 +560,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
i++
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -584,9 +585,9 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
i := uint64(0)
|
||||
@@ -594,7 +595,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
i++
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -619,9 +620,9 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
i := uint64(0)
|
||||
@@ -629,7 +630,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
i++
|
||||
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -655,13 +656,13 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
|
||||
_, err = helpers.BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify previous epoch is cached
|
||||
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
activeIndices, err := committeeCache.ActiveIndices(context.Background(), seed)
|
||||
activeIndices, err := helpers.CommitteeCache().ActiveIndices(context.Background(), seed)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, activeIndices, "Did not cache active indices")
|
||||
}
|
||||
@@ -680,19 +681,19 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
|
||||
proposerIndices, err := helpers.PrecomputeProposerIndices(state, indices, time.CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
|
||||
var wantedProposerIndices []primitives.ValidatorIndex
|
||||
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
seedWithSlotHash := hash.Hash(seedWithSlot)
|
||||
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
require.NoError(t, err)
|
||||
wantedProposerIndices = append(wantedProposerIndices, index)
|
||||
}
|
||||
|
||||
17
beacon-chain/core/helpers/private_access_fuzz_noop_test.go
Normal file
17
beacon-chain/core/helpers/private_access_fuzz_noop_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
//go:build fuzz
|
||||
|
||||
package helpers
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
|
||||
func CommitteeCache() *cache.FakeCommitteeCache {
|
||||
return committeeCache
|
||||
}
|
||||
|
||||
func SyncCommitteeCache() *cache.FakeSyncCommitteeCache {
|
||||
return syncCommitteeCache
|
||||
}
|
||||
|
||||
func ProposerIndicesCache() *cache.FakeProposerIndicesCache {
|
||||
return proposerIndicesCache
|
||||
}
|
||||
17
beacon-chain/core/helpers/private_access_test.go
Normal file
17
beacon-chain/core/helpers/private_access_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package helpers
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
|
||||
func CommitteeCache() *cache.CommitteeCache {
|
||||
return committeeCache
|
||||
}
|
||||
|
||||
func SyncCommitteeCache() *cache.SyncCommitteeCache {
|
||||
return syncCommitteeCache
|
||||
}
|
||||
|
||||
func ProposerIndicesCache() *cache.ProposerIndicesCache {
|
||||
return proposerIndicesCache
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -40,10 +41,10 @@ func TestRandaoMix_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
|
||||
mix, err := RandaoMix(state, test.epoch)
|
||||
mix, err := helpers.RandaoMix(state, test.epoch)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, test.randaoMix, mix, "Incorrect randao mix")
|
||||
}
|
||||
@@ -76,10 +77,10 @@ func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
|
||||
mix, err := RandaoMix(state, test.epoch)
|
||||
mix, err := helpers.RandaoMix(state, test.epoch)
|
||||
require.NoError(t, err)
|
||||
uniqueNumber := uint64(params.BeaconConfig().EpochsPerHistoricalVector.Add(1000))
|
||||
binary.LittleEndian.PutUint64(mix, uniqueNumber)
|
||||
@@ -92,7 +93,7 @@ func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateSeed_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
@@ -107,7 +108,7 @@ func TestGenerateSeed_OK(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
|
||||
got, err := helpers.Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := [32]byte{102, 82, 23, 40, 226, 79, 171, 11, 203, 23, 175, 7, 88, 202, 80,
|
||||
|
||||
@@ -22,7 +22,7 @@ var balanceCache = cache.NewEffectiveBalanceCache()
|
||||
// """
|
||||
// Return the combined effective balance of the ``indices``.
|
||||
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
|
||||
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
|
||||
// Math safe up to ~10B ETH, after which this overflows uint64.
|
||||
// """
|
||||
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
|
||||
func TotalBalance(state state.ReadOnlyValidators, indices []primitives.ValidatorIndex) uint64 {
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -14,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func TestTotalBalance_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
|
||||
@@ -22,19 +23,19 @@ func TestTotalBalance_OK(t *testing.T) {
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
balance := TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
|
||||
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
|
||||
wanted := state.Validators()[0].EffectiveBalance + state.Validators()[1].EffectiveBalance +
|
||||
state.Validators()[2].EffectiveBalance + state.Validators()[3].EffectiveBalance
|
||||
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
|
||||
}
|
||||
|
||||
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: []*ethpb.Validator{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
balance := TotalBalance(state, []primitives.ValidatorIndex{})
|
||||
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{})
|
||||
wanted := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
|
||||
}
|
||||
@@ -51,7 +52,7 @@ func TestGetBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{0, 0, 0}},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Balances: test.b})
|
||||
require.NoError(t, err)
|
||||
@@ -68,7 +69,7 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
@@ -76,7 +77,7 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
bal, err := helpers.TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(test.vCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
|
||||
}
|
||||
@@ -91,7 +92,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
@@ -99,7 +100,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
bal, err := helpers.TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, bal)
|
||||
}
|
||||
@@ -115,7 +116,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
{vCount: 10000, wantCount: 10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
@@ -123,7 +124,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
bal, err := helpers.TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(test.wantCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
|
||||
}
|
||||
@@ -141,7 +142,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
|
||||
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
@@ -149,7 +150,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
|
||||
Balances: test.b,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, IncreaseBalance(state, test.i, test.nb))
|
||||
require.NoError(t, helpers.IncreaseBalance(state, test.i, test.nb))
|
||||
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
|
||||
}
|
||||
}
|
||||
@@ -167,7 +168,7 @@ func TestDecreaseBalance_OK(t *testing.T) {
|
||||
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
@@ -175,13 +176,13 @@ func TestDecreaseBalance_OK(t *testing.T) {
|
||||
Balances: test.b,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, DecreaseBalance(state, test.i, test.nb))
|
||||
require.NoError(t, helpers.DecreaseBalance(state, test.i, test.nb))
|
||||
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalityDelay(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
|
||||
base.FinalizedCheckpoint = ðpb.Checkpoint{Epoch: 3}
|
||||
@@ -195,25 +196,25 @@ func TestFinalityDelay(t *testing.T) {
|
||||
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
|
||||
}
|
||||
setVal()
|
||||
d := FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
d := helpers.FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
w := time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
|
||||
assert.Equal(t, w, d, "Did not get wanted finality delay")
|
||||
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 4}))
|
||||
setVal()
|
||||
d = FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
|
||||
assert.Equal(t, w, d, "Did not get wanted finality delay")
|
||||
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 5}))
|
||||
setVal()
|
||||
d = FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
|
||||
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
|
||||
assert.Equal(t, w, d, "Did not get wanted finality delay")
|
||||
}
|
||||
|
||||
func TestIsInInactivityLeak(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
|
||||
base.FinalizedCheckpoint = ðpb.Checkpoint{Epoch: 3}
|
||||
@@ -227,13 +228,13 @@ func TestIsInInactivityLeak(t *testing.T) {
|
||||
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
|
||||
}
|
||||
setVal()
|
||||
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 4}))
|
||||
setVal()
|
||||
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
|
||||
require.NoError(t, beaconState.SetFinalizedCheckpoint(ðpb.Checkpoint{Epoch: 5}))
|
||||
setVal()
|
||||
assert.Equal(t, false, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
|
||||
assert.Equal(t, false, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
|
||||
}
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
@@ -285,7 +286,7 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
{i: 2, b: []uint64{math.MaxUint64, math.MaxUint64, math.MaxUint64}, nb: 33 * 1e9},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
@@ -293,6 +294,6 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
Balances: test.b,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "addition overflows", IncreaseBalance(state, test.i, test.nb))
|
||||
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ var (
|
||||
// 1. Checks if the public key exists in the sync committee cache
|
||||
// 2. If 1 fails, checks if the public key exists in the input current sync committee object
|
||||
func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.ValidatorIndex) (bool, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -63,7 +63,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
|
||||
func IsNextPeriodSyncCommittee(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) (bool, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func IsNextPeriodSyncCommittee(
|
||||
func CurrentPeriodSyncSubcommitteeIndices(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) ([]primitives.CommitteeIndex, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
func NextPeriodSyncSubcommitteeIndices(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
) ([]primitives.CommitteeIndex, error) {
|
||||
root, err := syncPeriodBoundaryRoot(st)
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -182,10 +182,10 @@ func findSubCommitteeIndices(pubKey []byte, pubKeys [][]byte) []primitives.Commi
|
||||
return indices
|
||||
}
|
||||
|
||||
// Retrieve the current sync period boundary root by calculating sync period start epoch
|
||||
// SyncPeriodBoundaryRoot computes the current sync period boundary root by calculating sync period start epoch
|
||||
// and calling `BlockRoot`.
|
||||
// It uses the boundary slot - 1 for block root. (Ex: SlotsPerEpoch * EpochsPerSyncCommitteePeriod - 1)
|
||||
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
|
||||
func SyncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
|
||||
// Can't call `BlockRoot` until the first slot.
|
||||
if st.Slot() == params.BeaconConfig().GenesisSlot {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -17,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -40,15 +41,15 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -70,13 +71,13 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -98,13 +99,13 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "validator index 12390192 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -127,15 +128,15 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 0)
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -157,13 +158,13 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 0)
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -185,13 +186,13 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
ok, err := helpers.IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "validator index 120391029 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -214,15 +215,15 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -243,27 +244,27 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
root, err := syncPeriodBoundaryRoot(state)
|
||||
root, err := helpers.SyncPeriodBoundaryRoot(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that cache was empty.
|
||||
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
|
||||
_, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
|
||||
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
|
||||
|
||||
// Test that helper can retrieve the index given empty cache.
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
|
||||
// Test that cache was able to fill on miss.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
|
||||
index, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -285,13 +286,13 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "validator index 129301923 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -314,15 +315,15 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -344,13 +345,13 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
|
||||
}
|
||||
|
||||
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -372,43 +373,43 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "validator index 21093019 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
err = helpers.UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "not at the end of the epoch to update cache", err)
|
||||
|
||||
state, err = state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch - 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
err = helpers.UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "not at sync committee period boundary to update cache", err)
|
||||
}
|
||||
|
||||
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
err = helpers.UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "zero hash state root can't be used to update cache", err)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -435,7 +436,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
comIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedSlot := params.BeaconConfig().EpochsPerSyncCommitteePeriod.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
@@ -446,7 +447,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
syncCommittee.Pubkeys[i], syncCommittee.Pubkeys[j] = syncCommittee.Pubkeys[j], syncCommittee.Pubkeys[i]
|
||||
})
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
newIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
newIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, comIdxs, newIdxs)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
@@ -32,7 +33,7 @@ func TestIsActiveValidator_OK(t *testing.T) {
|
||||
}
|
||||
for _, test := range tests {
|
||||
validator := ðpb.Validator{ActivationEpoch: 10, ExitEpoch: 100}
|
||||
assert.Equal(t, test.b, IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
|
||||
assert.Equal(t, test.b, helpers.IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,7 +54,7 @@ func TestIsActiveValidatorUsingTrie_OK(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.b, IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
|
||||
assert.Equal(t, test.b, helpers.IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,7 +82,7 @@ func TestIsActiveNonSlashedValidatorUsingTrie_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.b, IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
|
||||
assert.Equal(t, test.b, helpers.IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,7 +162,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Run("without trie", func(t *testing.T) {
|
||||
slashableValidator := IsSlashableValidator(test.validator.ActivationEpoch,
|
||||
slashableValidator := helpers.IsSlashableValidator(test.validator.ActivationEpoch,
|
||||
test.validator.WithdrawableEpoch, test.validator.Slashed, test.epoch)
|
||||
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
|
||||
})
|
||||
@@ -170,7 +171,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
|
||||
require.NoError(t, err)
|
||||
slashableValidator := IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
|
||||
slashableValidator := helpers.IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
|
||||
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
|
||||
})
|
||||
})
|
||||
@@ -223,17 +224,17 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.slot))
|
||||
result, err := BeaconProposerIndex(context.Background(), state)
|
||||
result, err := helpers.BeaconProposerIndex(context.Background(), state)
|
||||
require.NoError(t, err, "Failed to get shard and committees at slot")
|
||||
assert.Equal(t, tt.index, result, "Result index was an unexpected value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig()
|
||||
@@ -261,12 +262,12 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
// Set a very high slot, so that retrieved block root will be
|
||||
// non existent for the proposer cache.
|
||||
require.NoError(t, state.SetSlot(100))
|
||||
_, err = BeaconProposerIndex(context.Background(), state)
|
||||
_, err = helpers.BeaconProposerIndex(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -281,22 +282,22 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
|
||||
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var proposerIndices []primitives.ValidatorIndex
|
||||
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
seedWithSlotHash := hash.Hash(seedWithSlot)
|
||||
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
require.NoError(t, err)
|
||||
proposerIndices = append(proposerIndices, index)
|
||||
}
|
||||
|
||||
var wantedProposerIndices []primitives.ValidatorIndex
|
||||
seed, err = Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
seed, err = helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
|
||||
require.NoError(t, err)
|
||||
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
|
||||
@@ -309,15 +310,15 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDelayedActivationExitEpoch_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
epoch := primitives.Epoch(9999)
|
||||
wanted := epoch + 1 + params.BeaconConfig().MaxSeedLookahead
|
||||
assert.Equal(t, wanted, ActivationExitEpoch(epoch))
|
||||
assert.Equal(t, wanted, helpers.ActivationExitEpoch(epoch))
|
||||
}
|
||||
|
||||
func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
c := 1000
|
||||
validators := make([]*ethpb.Validator, c)
|
||||
@@ -334,10 +335,10 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Preset cache to a bad count.
|
||||
seed, err := Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
seed, err := helpers.Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, committeeCache.AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, helpers.CommitteeCache().AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
|
||||
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count")
|
||||
}
|
||||
@@ -353,7 +354,7 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -368,9 +369,9 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
resultChurn := ValidatorActivationChurnLimit(validatorCount)
|
||||
resultChurn := helpers.ValidatorActivationChurnLimit(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
|
||||
}
|
||||
}
|
||||
@@ -386,7 +387,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create validators
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
@@ -405,11 +406,11 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get active validator count
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test churn limit calculation
|
||||
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
resultChurn := helpers.ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn)
|
||||
}
|
||||
}
|
||||
@@ -574,11 +575,11 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
|
||||
require.NoError(t, err)
|
||||
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
got, err := helpers.ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
@@ -684,12 +685,12 @@ func TestComputeProposerIndex(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
bState := ðpb.BeaconState{Validators: tt.args.validators}
|
||||
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
|
||||
require.NoError(t, err)
|
||||
got, err := ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
return
|
||||
@@ -718,9 +719,9 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
assert.Equal(t, tt.want, IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -747,11 +748,11 @@ func TestIsIsEligibleForActivation(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
|
||||
assert.Equal(t, tt.want, helpers.IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -765,7 +766,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
|
||||
for i := uint64(0); ; i++ {
|
||||
candidateIndex, err := ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
|
||||
candidateIndex, err := helpers.ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -787,7 +788,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
}
|
||||
|
||||
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
@@ -806,13 +807,13 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
require.NoError(t, beaconState.SetBalances(balances))
|
||||
|
||||
index, err := LastActivatedValidatorIndex(context.Background(), beaconState)
|
||||
index, err := helpers.LastActivatedValidatorIndex(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, index, primitives.ValidatorIndex(3))
|
||||
}
|
||||
|
||||
func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
ClearCache()
|
||||
helpers.ClearCache()
|
||||
|
||||
e := primitives.Epoch(2)
|
||||
r := [32]byte{'a'}
|
||||
@@ -820,10 +821,10 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
ids := [32]primitives.ValidatorIndex{}
|
||||
slot := primitives.Slot(69) // slot 5 in the Epoch
|
||||
ids[5] = primitives.ValidatorIndex(19)
|
||||
proposerIndicesCache.Set(e, r, ids)
|
||||
helpers.ProposerIndicesCache().Set(e, r, ids)
|
||||
c := &forkchoicetypes.Checkpoint{Root: root, Epoch: e - 1}
|
||||
proposerIndicesCache.SetCheckpoint(*c, r)
|
||||
id, err := ProposerIndexAtSlotFromCheckpoint(c, slot)
|
||||
helpers.ProposerIndicesCache().SetCheckpoint(*c, r)
|
||||
id, err := helpers.ProposerIndexAtSlotFromCheckpoint(c, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ids[5], id)
|
||||
}
|
||||
|
||||
21
beacon-chain/core/peerdas/BUILD.bazel
Normal file
21
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,21 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
240
beacon-chain/core/peerdas/helpers.go
Normal file
240
beacon-chain/core/peerdas/helpers.go
Normal file
@@ -0,0 +1,240 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/holiman/uint256"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
// Bytes per cell
|
||||
bytesPerCell = cKzg4844.FieldElementsPerCell * cKzg4844.BytesPerFieldElement
|
||||
|
||||
// Number of cells in the extended matrix
|
||||
extendedMatrixSize = fieldparams.MaxBlobsPerBlock * cKzg4844.CellsPerExtBlob
|
||||
)
|
||||
|
||||
type (
|
||||
extendedMatrix []cKzg4844.Cell
|
||||
|
||||
cellCoordinate struct {
|
||||
blobIndex uint64
|
||||
cellID uint64
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
|
||||
errCellNotFound = errors.New("cell not found (should never happen)")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
|
||||
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the custodied subnets.
|
||||
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody subnets")
|
||||
}
|
||||
|
||||
columnsPerSubnet := cKzg4844.CellsPerExtBlob / dataColumnSidecarSubnetCount
|
||||
|
||||
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
|
||||
// Columns belonging to the same subnet are contiguous.
|
||||
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
|
||||
for i := uint64(0); i < columnsPerSubnet; i++ {
|
||||
for subnetId := range subnetIds {
|
||||
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
|
||||
columnIndices[columnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columnIndices, nil
|
||||
}
|
||||
|
||||
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Check if the custody subnet count is larger than the data column sidecar subnet count.
|
||||
if custodySubnetCount > dataColumnSidecarSubnetCount {
|
||||
return nil, errCustodySubnetCountTooLarge
|
||||
}
|
||||
|
||||
// First, compute the subnet IDs that the node should participate in.
|
||||
subnetIds := make(map[uint64]bool, custodySubnetCount)
|
||||
|
||||
for i := uint64(0); uint64(len(subnetIds)) < custodySubnetCount; i++ {
|
||||
nodeIdUInt256, nextNodeIdUInt256 := new(uint256.Int), new(uint256.Int)
|
||||
nodeIdUInt256.SetBytes(nodeId.Bytes())
|
||||
nextNodeIdUInt256.Add(nodeIdUInt256, uint256.NewInt(i))
|
||||
nextNodeIdUInt64 := nextNodeIdUInt256.Uint64()
|
||||
nextNodeId := bytesutil.Uint64ToBytesLittleEndian(nextNodeIdUInt64)
|
||||
|
||||
hashedNextNodeId := hash.Hash(nextNodeId)
|
||||
subnetId := binary.LittleEndian.Uint64(hashedNextNodeId[:8]) % dataColumnSidecarSubnetCount
|
||||
|
||||
if _, exists := subnetIds[subnetId]; !exists {
|
||||
subnetIds[subnetId] = true
|
||||
}
|
||||
}
|
||||
|
||||
return subnetIds, nil
|
||||
}
|
||||
|
||||
// ComputeExtendedMatrix computes the extended matrix from the blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#compute_extended_matrix
|
||||
func ComputeExtendedMatrix(blobs []cKzg4844.Blob) (extendedMatrix, error) {
|
||||
matrix := make(extendedMatrix, 0, extendedMatrixSize)
|
||||
|
||||
for i := range blobs {
|
||||
// Chunk a non-extended blob into cells representing the corresponding extended blob.
|
||||
blob := &blobs[i]
|
||||
cells, err := cKzg4844.ComputeCells(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells for blob")
|
||||
}
|
||||
|
||||
matrix = append(matrix, cells[:]...)
|
||||
}
|
||||
|
||||
return matrix, nil
|
||||
}
|
||||
|
||||
// RecoverMatrix recovers the extended matrix from some cells.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func RecoverMatrix(cellFromCoordinate map[cellCoordinate]cKzg4844.Cell, blobCount uint64) (extendedMatrix, error) {
|
||||
matrix := make(extendedMatrix, 0, extendedMatrixSize)
|
||||
|
||||
for blobIndex := uint64(0); blobIndex < blobCount; blobIndex++ {
|
||||
// Filter all cells that belong to the current blob.
|
||||
cellIds := make([]uint64, 0, cKzg4844.CellsPerExtBlob)
|
||||
for coordinate := range cellFromCoordinate {
|
||||
if coordinate.blobIndex == blobIndex {
|
||||
cellIds = append(cellIds, coordinate.cellID)
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve cells corresponding to all `cellIds`.
|
||||
cellIdsCount := len(cellIds)
|
||||
|
||||
cells := make([]cKzg4844.Cell, 0, cellIdsCount)
|
||||
for _, cellId := range cellIds {
|
||||
coordinate := cellCoordinate{blobIndex: blobIndex, cellID: cellId}
|
||||
cell, ok := cellFromCoordinate[coordinate]
|
||||
if !ok {
|
||||
return matrix, errCellNotFound
|
||||
}
|
||||
|
||||
cells = append(cells, cell)
|
||||
}
|
||||
|
||||
// Recover all cells.
|
||||
allCellsForRow, err := cKzg4844.RecoverAllCells(cellIds, cells)
|
||||
if err != nil {
|
||||
return matrix, errors.Wrap(err, "recover all cells")
|
||||
}
|
||||
|
||||
matrix = append(matrix, allCellsForRow[:]...)
|
||||
}
|
||||
|
||||
return matrix, nil
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.SignedBeaconBlock, blobs []cKzg4844.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
blobsCount := len(blobs)
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
|
||||
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
|
||||
cells = append(cells, blobCells)
|
||||
proofs = append(proofs, blobProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, cKzg4844.CellsPerExtBlob)
|
||||
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
|
||||
column := make([]cKzg4844.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cell := cells[rowIndex][columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofs[rowIndex][columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
cell := column[i]
|
||||
|
||||
cellBytes := make([]byte, 0, bytesPerCell)
|
||||
for _, fieldElement := range cell {
|
||||
cellBytes = append(cellBytes, fieldElement[:]...)
|
||||
}
|
||||
|
||||
columnBytes = append(columnBytes, cellBytes)
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
@@ -59,7 +59,7 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
|
||||
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
|
||||
}
|
||||
|
||||
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
|
||||
// ComputeDomainAndSignWithoutState offers the same functionality as ComputeDomainAndSign without the need to provide a BeaconState.
|
||||
// This is particularly helpful for signing values in tests.
|
||||
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
|
||||
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.
|
||||
|
||||
@@ -94,6 +94,15 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
|
||||
entry := s.cache.ensure(key)
|
||||
defer s.cache.delete(key)
|
||||
root := b.Root()
|
||||
sumz, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
entry.setDiskSummary(sumz.Summary(root))
|
||||
}
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -59,7 +60,12 @@ func (c *cache) delete(key cacheKey) {
|
||||
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
func (e *cacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of BlobSidecars.
|
||||
@@ -81,9 +87,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
// commitments were found in the cache and the sidecar slice return value can be used
|
||||
// to perform a DA check against the cached sidecars.
|
||||
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
|
||||
func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) {
|
||||
scs := make([]blocks.ROBlob, kc.count())
|
||||
if e.diskSummary.AllAvailable(kc.count()) {
|
||||
return nil, nil
|
||||
}
|
||||
scs := make([]blocks.ROBlob, 0, kc.count())
|
||||
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
// We already have this blob, we don't need to write it or validate it.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
if kc[i] == nil {
|
||||
if e.scs[i] != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
|
||||
@@ -97,7 +111,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
|
||||
}
|
||||
scs[i] = *e.scs[i]
|
||||
scs = append(scs, *e.scs[i])
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
|
||||
@@ -3,9 +3,14 @@ package das
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestCacheEnsureDelete(t *testing.T) {
|
||||
@@ -23,3 +28,145 @@ func TestCacheEnsureDelete(t *testing.T) {
|
||||
var nilEntry *cacheEntry
|
||||
require.Equal(t, nilEntry, c.entries[k])
|
||||
}
|
||||
|
||||
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
|
||||
|
||||
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
|
||||
return func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
|
||||
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
entry := &cacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
expected := make([]blocks.ROBlob, 0, nBlobs)
|
||||
for i := 0; i < commits.count(); i++ {
|
||||
if entry.diskSummary.HasIndex(uint64(i)) {
|
||||
continue
|
||||
}
|
||||
// If we aren't telling the cache a blob is on disk, add it to the expected list and stash.
|
||||
expected = append(expected, blobs[i])
|
||||
require.NoError(t, entry.stash(&blobs[i]))
|
||||
}
|
||||
require.Equal(t, numExpected, len(expected))
|
||||
return entry, commits, expected
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterDiskSummary(t *testing.T) {
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup filterTestCaseSetupFunc
|
||||
}{
|
||||
{
|
||||
name: "full blobs, all on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{0, 1, 2, 3, 4, 5}, 0),
|
||||
},
|
||||
{
|
||||
name: "full blobs, first on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{0}, 5),
|
||||
},
|
||||
{
|
||||
name: "full blobs, middle on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{2}, 5),
|
||||
},
|
||||
{
|
||||
name: "full blobs, last on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{5}, 5),
|
||||
},
|
||||
{
|
||||
name: "full blobs, none on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 6, []int{}, 6),
|
||||
},
|
||||
{
|
||||
name: "one commitment, on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 1, []int{0}, 0),
|
||||
},
|
||||
{
|
||||
name: "one commitment, not on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 1, []int{}, 1),
|
||||
},
|
||||
{
|
||||
name: "two commitments, first on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{0}, 1),
|
||||
},
|
||||
{
|
||||
name: "two commitments, last on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{1}, 1),
|
||||
},
|
||||
{
|
||||
name: "two commitments, none on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{}, 2),
|
||||
},
|
||||
{
|
||||
name: "two commitments, all on disk",
|
||||
setup: filterTestCaseSetup(denebSlot, 2, []int{0, 1}, 0),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "commitments mismatch - extra sidecar",
|
||||
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
commits[5] = nil
|
||||
return entry, commits, expected
|
||||
},
|
||||
err: errCommitmentMismatch,
|
||||
},
|
||||
{
|
||||
name: "sidecar missing",
|
||||
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
entry.scs[5] = nil
|
||||
return entry, commits, expected
|
||||
},
|
||||
err: errMissingSidecar,
|
||||
},
|
||||
{
|
||||
name: "commitments mismatch - different bytes",
|
||||
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
|
||||
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
|
||||
entry.scs[5].KzgCommitment = []byte("nope")
|
||||
return entry, commits, expected
|
||||
},
|
||||
err: errCommitmentMismatch,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
entry, commits, expected := c.setup(t)
|
||||
// first (root) argument doesn't matter, it is just for logs
|
||||
got, err := entry.filter([32]byte{}, commits)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,10 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"ephemeral.go",
|
||||
"cache.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"mock.go",
|
||||
"pruner.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
@@ -33,6 +34,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"pruner_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -103,12 +105,29 @@ func (bs *BlobStorage) WarmCache() {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
if err := bs.pruner.prune(0); err != nil {
|
||||
start := time.Now()
|
||||
if err := bs.pruner.warmCache(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
|
||||
}()
|
||||
}
|
||||
|
||||
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
|
||||
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
|
||||
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
|
||||
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
|
||||
|
||||
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
|
||||
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
|
||||
// determine which blobs are available.
|
||||
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
|
||||
if bs == nil || bs.pruner == nil {
|
||||
return nil, ErrBlobStorageSummarizerUnavailable
|
||||
}
|
||||
return bs.pruner.waitForCache(ctx)
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
startTime := time.Now()
|
||||
@@ -199,6 +218,101 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves a data column to our local filesystem.
|
||||
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForDataColumn(column)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
log.Debug("Ignoring a duplicate data column sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
if bs.pruner != nil {
|
||||
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
|
||||
sidecarData, err := column.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
// TODO: Use new metrics for data columns
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -224,6 +338,20 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.DataColumnSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
@@ -267,6 +395,41 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// ColumnIndices retrieve the stored column indexes from our filesystem.
|
||||
func (bs *BlobStorage) ColumnIndices(root [32]byte) ([fieldparams.NumberOfColumns]bool, error) {
|
||||
var mask [fieldparams.NumberOfColumns]bool
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return mask, nil
|
||||
}
|
||||
return mask, err
|
||||
}
|
||||
for i := range entries {
|
||||
if entries[i].IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entries[i].Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
u, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
if u >= fieldparams.NumberOfColumns {
|
||||
return mask, errIndexOutOfBounds
|
||||
}
|
||||
mask[u] = true
|
||||
}
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (bs *BlobStorage) Clear() error {
|
||||
dirs, err := listDir(bs.fs, ".")
|
||||
@@ -281,6 +444,15 @@ func (bs *BlobStorage) Clear() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithinRetentionPeriod checks if the requested epoch is within the blob retention period.
|
||||
func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch) bool {
|
||||
if requested > math.MaxUint64-bs.retentionEpochs {
|
||||
// If there is an overflow, then the retention period was set to an extremely large number.
|
||||
return true
|
||||
}
|
||||
return requested+bs.retentionEpochs >= current
|
||||
}
|
||||
|
||||
type blobNamer struct {
|
||||
root [32]byte
|
||||
index uint64
|
||||
@@ -290,6 +462,10 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
|
||||
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
@@ -24,8 +25,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := namerForSidecar(existingSidecar).path()
|
||||
@@ -129,8 +129,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
|
||||
@@ -161,8 +160,7 @@ func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
|
||||
|
||||
func TestBlobStoragePrune(t *testing.T) {
|
||||
currentSlot := primitives.Slot(200000)
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
|
||||
t.Run("PruneOne", func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, fieldparams.MaxBlobsPerBlock)
|
||||
@@ -218,8 +216,7 @@ func TestBlobStoragePrune(t *testing.T) {
|
||||
|
||||
func BenchmarkPruning(b *testing.B) {
|
||||
var t *testing.T
|
||||
_, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
_, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
|
||||
blockQty := 10000
|
||||
currentSlot := primitives.Slot(150000)
|
||||
@@ -248,3 +245,50 @@ func TestNewBlobStorage(t *testing.T) {
|
||||
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConfig_WithinRetentionPeriod(t *testing.T) {
|
||||
retention := primitives.Epoch(16)
|
||||
storage := &BlobStorage{retentionEpochs: retention}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
requested primitives.Epoch
|
||||
current primitives.Epoch
|
||||
within bool
|
||||
}{
|
||||
{
|
||||
name: "before",
|
||||
requested: 0,
|
||||
current: retention + 1,
|
||||
within: false,
|
||||
},
|
||||
{
|
||||
name: "same",
|
||||
requested: 0,
|
||||
current: 0,
|
||||
within: true,
|
||||
},
|
||||
{
|
||||
name: "boundary",
|
||||
requested: 0,
|
||||
current: retention,
|
||||
within: true,
|
||||
},
|
||||
{
|
||||
name: "one less",
|
||||
requested: retention - 1,
|
||||
current: retention,
|
||||
within: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.Equal(t, c.within, storage.WithinRetentionPeriod(c.requested, c.current))
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("overflow", func(t *testing.T) {
|
||||
storage := &BlobStorage{retentionEpochs: math.MaxUint64}
|
||||
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
|
||||
})
|
||||
}
|
||||
|
||||
119
beacon-chain/db/filesystem/cache.go
Normal file
119
beacon-chain/db/filesystem/cache.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
slot primitives.Slot
|
||||
mask blobIndexMask
|
||||
}
|
||||
|
||||
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return false
|
||||
}
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > fieldparams.MaxBlobsPerBlock {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
if !s.mask[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
|
||||
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type BlobStorageSummarizer interface {
|
||||
Summary(root [32]byte) BlobStorageSummary
|
||||
}
|
||||
|
||||
type blobStorageCache struct {
|
||||
mu sync.RWMutex
|
||||
nBlobs float64
|
||||
cache map[string]BlobStorageSummary
|
||||
}
|
||||
|
||||
var _ BlobStorageSummarizer = &blobStorageCache{}
|
||||
|
||||
func newBlobStorageCache() *blobStorageCache {
|
||||
return &blobStorageCache{
|
||||
cache: make(map[string]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
|
||||
// BlobSidecars based on Index.
|
||||
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
k := rootString(root)
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.cache[k]
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key string, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
v.slot = slot
|
||||
if !v.mask[idx] {
|
||||
s.updateMetrics(1)
|
||||
}
|
||||
v.mask[idx] = true
|
||||
s.cache[key] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) slot(key string) (primitives.Slot, bool) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return v.slot, ok
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) evict(key string) {
|
||||
var deleted float64
|
||||
s.mu.Lock()
|
||||
v, ok := s.cache[key]
|
||||
if ok {
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(s.cache, key)
|
||||
s.mu.Unlock()
|
||||
if deleted > 0 {
|
||||
s.updateMetrics(-deleted)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) updateMetrics(delta float64) {
|
||||
s.nBlobs += delta
|
||||
blobDiskCount.Set(s.nBlobs)
|
||||
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
|
||||
}
|
||||
150
beacon-chain/db/filesystem/cache_test.go
Normal file
150
beacon-chain/db/filesystem/cache_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
oneSet[1] = true
|
||||
for i := range allSet {
|
||||
allSet[i] = true
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
root [32]byte
|
||||
expected *blobIndexMask
|
||||
}{
|
||||
{
|
||||
name: "not found",
|
||||
},
|
||||
{
|
||||
name: "none set",
|
||||
expected: &noneSet,
|
||||
},
|
||||
{
|
||||
name: "index 1 set",
|
||||
expected: &oneSet,
|
||||
},
|
||||
{
|
||||
name: "all set",
|
||||
expected: &allSet,
|
||||
},
|
||||
{
|
||||
name: "first set",
|
||||
expected: &firstSet,
|
||||
},
|
||||
{
|
||||
name: "last set",
|
||||
expected: &lastSet,
|
||||
},
|
||||
}
|
||||
sc := newBlobStorageCache()
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := rootString(bytesutil.ToBytes32([]byte(c.name)))
|
||||
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sum := sc.Summary(key)
|
||||
for i := range c.expected {
|
||||
ui := uint64(i)
|
||||
if c.expected == nil {
|
||||
require.Equal(t, false, sum.HasIndex(ui))
|
||||
} else {
|
||||
require.Equal(t, c.expected[i], sum.HasIndex(ui))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllAvailable(t *testing.T) {
|
||||
idxUpTo := func(u int) []int {
|
||||
r := make([]int, u)
|
||||
for i := range r {
|
||||
r[i] = i
|
||||
}
|
||||
return r
|
||||
}
|
||||
require.DeepEqual(t, []int{}, idxUpTo(0))
|
||||
require.DeepEqual(t, []int{0}, idxUpTo(1))
|
||||
require.DeepEqual(t, []int{0, 1, 2, 3, 4, 5}, idxUpTo(6))
|
||||
cases := []struct {
|
||||
name string
|
||||
idxSet []int
|
||||
count int
|
||||
aa bool
|
||||
}{
|
||||
{
|
||||
// If there are no blobs committed, then all the committed blobs are available.
|
||||
name: "none in idx, 0 arg",
|
||||
count: 0,
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
name: "none in idx, 1 arg",
|
||||
count: 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "first in idx, 1 arg",
|
||||
idxSet: []int{0},
|
||||
count: 1,
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
name: "second in idx, 1 arg",
|
||||
idxSet: []int{1},
|
||||
count: 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "first missing, 2 arg",
|
||||
idxSet: []int{1},
|
||||
count: 2,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "all missing, 1 arg",
|
||||
count: 6,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "out of bound is safe",
|
||||
count: fieldparams.MaxBlobsPerBlock + 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "max present",
|
||||
count: fieldparams.MaxBlobsPerBlock,
|
||||
idxSet: idxUpTo(fieldparams.MaxBlobsPerBlock),
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
name: "one present",
|
||||
count: 1,
|
||||
idxSet: idxUpTo(1),
|
||||
aa: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
var mask blobIndexMask
|
||||
for _, idx := range c.idxSet {
|
||||
mask[idx] = true
|
||||
}
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.aa, sum.AllAvailable(c.count))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
@@ -21,13 +21,13 @@ func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
return fs, &BlobStorage{fs: fs, pruner: pruner}, nil
|
||||
return fs, &BlobStorage{fs: fs, pruner: pruner}
|
||||
}
|
||||
|
||||
type BlobMocker struct {
|
||||
@@ -61,3 +61,15 @@ func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage)
|
||||
bs := &BlobStorage{fs: fs}
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(rootString(k), 0, uint64(v[i])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"path"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -32,22 +32,39 @@ type blobPruner struct {
|
||||
sync.Mutex
|
||||
prunedBefore atomic.Uint64
|
||||
windowSize primitives.Slot
|
||||
slotMap *slotForRoot
|
||||
cache *blobStorageCache
|
||||
cacheReady chan struct{}
|
||||
warmed bool
|
||||
fs afero.Fs
|
||||
}
|
||||
|
||||
func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
|
||||
type prunerOpt func(*blobPruner) error
|
||||
|
||||
func withWarmedCache() prunerOpt {
|
||||
return func(p *blobPruner) error {
|
||||
return p.warmCache()
|
||||
}
|
||||
}
|
||||
|
||||
func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*blobPruner, error) {
|
||||
r, err := slots.EpochStart(retain + retentionBuffer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set retentionSlots")
|
||||
}
|
||||
return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil
|
||||
cw := make(chan struct{})
|
||||
p := &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheReady: cw}
|
||||
for _, o := range opts {
|
||||
if err := o(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
|
||||
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
|
||||
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
|
||||
if err := p.slotMap.ensure(rootString(root), latest, idx); err != nil {
|
||||
if err := p.cache.ensure(rootString(root), latest, idx); err != nil {
|
||||
return err
|
||||
}
|
||||
pruned := uint64(windowMin(latest, p.windowSize))
|
||||
@@ -55,6 +72,8 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
|
||||
return nil
|
||||
}
|
||||
go func() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if err := p.prune(primitives.Slot(pruned)); err != nil {
|
||||
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
|
||||
}
|
||||
@@ -62,7 +81,7 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
|
||||
func windowMin(latest, offset primitives.Slot) primitives.Slot {
|
||||
// Safely compute the first slot in the epoch for the latest slot
|
||||
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
|
||||
if latest < offset {
|
||||
@@ -71,12 +90,32 @@ func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
|
||||
return latest - offset
|
||||
}
|
||||
|
||||
func (p *blobPruner) warmCache() error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if err := p.prune(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if !p.warmed {
|
||||
p.warmed = true
|
||||
close(p.cacheReady)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
|
||||
select {
|
||||
case <-p.cacheReady:
|
||||
return p.cache, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Prune prunes blobs in the base directory based on the retention epoch.
|
||||
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
|
||||
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
|
||||
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
start := time.Now()
|
||||
totalPruned, totalErr := 0, 0
|
||||
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
|
||||
@@ -122,7 +161,7 @@ func shouldRetain(slot, pruneBefore primitives.Slot) bool {
|
||||
|
||||
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
|
||||
root := rootFromDir(dir)
|
||||
slot, slotCached := p.slotMap.slot(root)
|
||||
slot, slotCached := p.cache.slot(root)
|
||||
// Return early if the slot is cached and doesn't need pruning.
|
||||
if slotCached && shouldRetain(slot, pruneBefore) {
|
||||
return 0, nil
|
||||
@@ -151,7 +190,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
|
||||
}
|
||||
if err := p.slotMap.ensure(root, slot, idx); err != nil {
|
||||
if err := p.cache.ensure(root, slot, idx); err != nil {
|
||||
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
|
||||
}
|
||||
}
|
||||
@@ -179,7 +218,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
|
||||
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
|
||||
}
|
||||
|
||||
p.slotMap.evict(rootFromDir(dir))
|
||||
p.cache.evict(rootFromDir(dir))
|
||||
return len(scFiles), nil
|
||||
}
|
||||
|
||||
@@ -269,71 +308,3 @@ func filterSsz(s string) bool {
|
||||
func filterPart(s string) bool {
|
||||
return filepath.Ext(s) == dotPartExt
|
||||
}
|
||||
|
||||
func newSlotForRoot() *slotForRoot {
|
||||
return &slotForRoot{
|
||||
cache: make(map[string]*slotCacheEntry, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
type slotCacheEntry struct {
|
||||
slot primitives.Slot
|
||||
mask [fieldparams.MaxBlobsPerBlock]bool
|
||||
}
|
||||
|
||||
type slotForRoot struct {
|
||||
sync.RWMutex
|
||||
nBlobs float64
|
||||
cache map[string]*slotCacheEntry
|
||||
}
|
||||
|
||||
func (s *slotForRoot) updateMetrics(delta float64) {
|
||||
s.nBlobs += delta
|
||||
blobDiskCount.Set(s.nBlobs)
|
||||
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
|
||||
}
|
||||
|
||||
func (s *slotForRoot) ensure(key string, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
v = &slotCacheEntry{}
|
||||
}
|
||||
v.slot = slot
|
||||
if !v.mask[idx] {
|
||||
s.updateMetrics(1)
|
||||
}
|
||||
v.mask[idx] = true
|
||||
s.cache[key] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return v.slot, ok
|
||||
}
|
||||
|
||||
func (s *slotForRoot) evict(key string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
v, ok := s.cache[key]
|
||||
var deleted float64
|
||||
if ok {
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
s.updateMetrics(-deleted)
|
||||
}
|
||||
delete(s.cache, key)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
|
||||
root := fmt.Sprintf("%#x", sc.BlockRoot())
|
||||
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
|
||||
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
|
||||
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
|
||||
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(root, pr.windowSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
@@ -45,14 +45,13 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root := fmt.Sprintf("%#x", sc.BlockRoot())
|
||||
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
|
||||
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
|
||||
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(root, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
})
|
||||
t.Run("blobs to delete", func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
@@ -63,7 +62,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
cs, cok := bs.pruner.slotMap.slot(root)
|
||||
cs, cok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, cok)
|
||||
require.Equal(t, slot, cs)
|
||||
|
||||
@@ -83,8 +82,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
|
||||
func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
t.Run("expired blobs deleted", func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
@@ -95,12 +93,12 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
cs, ok := bs.pruner.slotMap.slot(root)
|
||||
cs, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, slot, cs)
|
||||
// evict it from the cache so that we trigger the file read path
|
||||
bs.pruner.slotMap.evict(root)
|
||||
_, ok = bs.pruner.slotMap.slot(root)
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok = bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
@@ -116,10 +114,9 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
t.Run("not expired, intact", func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
// Set slot equal to the window size, so it should be retained.
|
||||
var slot primitives.Slot = bs.pruner.windowSize
|
||||
slot := bs.pruner.windowSize
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
@@ -129,8 +126,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
|
||||
// Evict slot mapping from the cache so that we trigger the file read path.
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
bs.pruner.slotMap.evict(root)
|
||||
_, ok := bs.pruner.slotMap.slot(root)
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Ensure that we see the saved files in the filesystem.
|
||||
@@ -184,8 +181,7 @@ func TestSlotFromFile(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
@@ -243,10 +239,8 @@ func TestListDir(t *testing.T) {
|
||||
}
|
||||
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
|
||||
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
|
||||
fsLayout.children = append(fsLayout.children, notABlob)
|
||||
fsLayout.children = append(fsLayout.children, childlessBlob)
|
||||
fsLayout.children = append(fsLayout.children, blobWithSsz)
|
||||
fsLayout.children = append(fsLayout.children, blobWithSszAndTmp)
|
||||
fsLayout.children = append(fsLayout.children,
|
||||
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
|
||||
|
||||
topChildren := make([]string, len(fsLayout.children))
|
||||
for i := range fsLayout.children {
|
||||
@@ -282,10 +276,7 @@ func TestListDir(t *testing.T) {
|
||||
dirPath: ".",
|
||||
expected: []string{notABlob.name},
|
||||
filter: func(s string) bool {
|
||||
if s == notABlob.name {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return s == notABlob.name
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -118,7 +118,7 @@ type HeadAccessDatabase interface {
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
type SlasherDatabase interface {
|
||||
io.Closer
|
||||
SaveLastEpochsWrittenForValidators(
|
||||
SaveLastEpochWrittenForValidators(
|
||||
ctx context.Context, epochByValidator map[primitives.ValidatorIndex]primitives.Epoch,
|
||||
) error
|
||||
SaveAttestationRecordsForValidators(
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_finalized_parent.go",
|
||||
"migration_state_validators.go",
|
||||
"schema.go",
|
||||
"state.go",
|
||||
|
||||
@@ -224,7 +224,7 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
if b := bkt.Get(root[:]); b != nil {
|
||||
return ErrDeleteFinalized
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
|
||||
|
||||
@@ -289,7 +289,7 @@ func TestStore_DeleteBlock(t *testing.T) {
|
||||
require.Equal(t, b, nil)
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, root2))
|
||||
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteJustifiedBlock(t *testing.T) {
|
||||
@@ -309,7 +309,7 @@ func TestStore_DeleteJustifiedBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteFinalizedBlock(t *testing.T) {
|
||||
@@ -329,7 +329,7 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteFinalized)
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
func TestStore_GenesisBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -2,8 +2,8 @@ package kv
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// ErrDeleteFinalized is raised when we attempt to delete a finalized block/state
|
||||
var ErrDeleteFinalized = errors.New("cannot delete finalized block or state")
|
||||
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
|
||||
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
|
||||
|
||||
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
|
||||
// indicate that a value couldn't be found.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -28,76 +29,72 @@ var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to
|
||||
// beacon block chain using the finalized root alone as this would exclude all other blocks in the
|
||||
// finalized epoch from being indexed as "final and canonical".
|
||||
//
|
||||
// The main part of the algorithm traverses parent->child block relationships in the
|
||||
// `blockParentRootIndicesBucket` bucket to find the path between the last finalized checkpoint
|
||||
// and the current finalized checkpoint. It relies on the invariant that there is a unique path
|
||||
// between two finalized checkpoints.
|
||||
// The algorithm for building the index works as follows:
|
||||
// - De-index all finalized beacon block roots from previous_finalized_epoch to
|
||||
// new_finalized_epoch. (I.e. delete these roots from the index, to be re-indexed.)
|
||||
// - Build the canonical finalized chain by walking up the ancestry chain from the finalized block
|
||||
// root until a parent is found in the index, or the parent is genesis or the origin checkpoint.
|
||||
// - Add all block roots in the database where epoch(block.slot) == checkpoint.epoch.
|
||||
//
|
||||
// This method ensures that all blocks from the current finalized epoch are considered "final" while
|
||||
// maintaining only canonical and finalized blocks older than the current finalized epoch.
|
||||
func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, checkpoint *ethpb.Checkpoint) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateFinalizedBlockRoots")
|
||||
defer span.End()
|
||||
|
||||
finalizedBkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
|
||||
root := checkpoint.Root
|
||||
var previousRoot []byte
|
||||
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
|
||||
initCheckpointRoot := tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)
|
||||
|
||||
// De-index recent finalized block roots, to be re-indexed.
|
||||
previousFinalizedCheckpoint := ðpb.Checkpoint{}
|
||||
if b := finalizedBkt.Get(previousFinalizedCheckpointKey); b != nil {
|
||||
if b := bkt.Get(previousFinalizedCheckpointKey); b != nil {
|
||||
if err := decode(ctx, b, previousFinalizedCheckpoint); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Handle the case of checkpoint sync.
|
||||
if previousFinalizedCheckpoint.Root == nil && bytes.Equal(checkpoint.Root, tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)) {
|
||||
container := ðpb.FinalizedBlockRootContainer{}
|
||||
enc, err := encode(ctx, container)
|
||||
blockRoots, err := s.BlockRoots(ctx, filters.NewFilter().
|
||||
SetStartEpoch(previousFinalizedCheckpoint.Epoch).
|
||||
SetEndEpoch(checkpoint.Epoch+1),
|
||||
)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
for _, root := range blockRoots {
|
||||
if err := bkt.Delete(root[:]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Walk up the ancestry chain until we reach a block root present in the finalized block roots
|
||||
// index bucket or genesis block root.
|
||||
for {
|
||||
if bytes.Equal(root, genesisRoot) {
|
||||
break
|
||||
}
|
||||
|
||||
signedBlock, err := s.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err = finalizedBkt.Put(checkpoint.Root, enc); err != nil {
|
||||
if err := blocks.BeaconBlockIsNil(signedBlock); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
|
||||
}
|
||||
block := signedBlock.Block()
|
||||
|
||||
var finalized [][]byte
|
||||
if previousFinalizedCheckpoint.Root == nil {
|
||||
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
|
||||
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{genesisRoot}, checkpoint.Root, tx)
|
||||
} else {
|
||||
if err := updateChildOfPrevFinalizedCheckpoint(
|
||||
ctx,
|
||||
span,
|
||||
finalizedBkt,
|
||||
tx.Bucket(blockParentRootIndicesBucket), previousFinalizedCheckpoint.Root,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
_, finalized = pathToFinalizedCheckpoint(ctx, [][]byte{previousFinalizedCheckpoint.Root}, checkpoint.Root, tx)
|
||||
}
|
||||
|
||||
for i, r := range finalized {
|
||||
var container *ethpb.FinalizedBlockRootContainer
|
||||
switch i {
|
||||
case 0:
|
||||
container = ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: previousFinalizedCheckpoint.Root,
|
||||
}
|
||||
if len(finalized) > 1 {
|
||||
container.ChildRoot = finalized[i+1]
|
||||
}
|
||||
case len(finalized) - 1:
|
||||
// We don't know the finalized child of the new finalized checkpoint.
|
||||
// It will be filled out in the next function call.
|
||||
container = ðpb.FinalizedBlockRootContainer{}
|
||||
if len(finalized) > 1 {
|
||||
container.ParentRoot = finalized[i-1]
|
||||
}
|
||||
default:
|
||||
container = ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: finalized[i-1],
|
||||
ChildRoot: finalized[i+1],
|
||||
}
|
||||
parentRoot := block.ParentRoot()
|
||||
container := ðpb.FinalizedBlockRootContainer{
|
||||
ParentRoot: parentRoot[:],
|
||||
ChildRoot: previousRoot,
|
||||
}
|
||||
|
||||
enc, err := encode(ctx, container)
|
||||
@@ -105,13 +102,66 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err = finalizedBkt.Put(r, enc); err != nil {
|
||||
if err := bkt.Put(root, enc); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// breaking here allows the initial checkpoint root to be correctly inserted,
|
||||
// but stops the loop from trying to search for its parent.
|
||||
if bytes.Equal(root, initCheckpointRoot) {
|
||||
break
|
||||
}
|
||||
|
||||
// Found parent, loop exit condition.
|
||||
pr := block.ParentRoot()
|
||||
if parentBytes := bkt.Get(pr[:]); parentBytes != nil {
|
||||
parent := ðpb.FinalizedBlockRootContainer{}
|
||||
if err := decode(ctx, parentBytes, parent); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
parent.ChildRoot = root
|
||||
enc, err := encode(ctx, parent)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := bkt.Put(pr[:], enc); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
previousRoot = root
|
||||
root = pr[:]
|
||||
}
|
||||
|
||||
// Upsert blocks from the current finalized epoch.
|
||||
roots, err := s.BlockRoots(ctx, filters.NewFilter().SetStartEpoch(checkpoint.Epoch).SetEndEpoch(checkpoint.Epoch+1))
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
for _, root := range roots {
|
||||
root := root[:]
|
||||
if bytes.Equal(root, checkpoint.Root) || bkt.Get(root) != nil {
|
||||
continue
|
||||
}
|
||||
if err := bkt.Put(root, containerFinalizedButNotCanonical); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return updatePrevFinalizedCheckpoint(ctx, span, finalizedBkt, checkpoint)
|
||||
// Update previous checkpoint
|
||||
enc, err := encode(ctx, checkpoint)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return bkt.Put(previousFinalizedCheckpointKey, enc)
|
||||
}
|
||||
|
||||
// BackfillFinalizedIndex updates the finalized index for a contiguous chain of blocks that are the ancestors of the
|
||||
@@ -192,6 +242,8 @@ func (s *Store) BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBl
|
||||
|
||||
// IsFinalizedBlock returns true if the block root is present in the finalized block root index.
|
||||
// A beacon block root contained exists in this index if it is considered finalized and canonical.
|
||||
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
|
||||
// considered canonical in the "head view" of the beacon node.
|
||||
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
|
||||
defer span.End()
|
||||
@@ -244,53 +296,3 @@ func (s *Store) FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (in
|
||||
tracing.AnnotateError(span, err)
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func pathToFinalizedCheckpoint(ctx context.Context, roots [][]byte, checkpointRoot []byte, tx *bolt.Tx) (bool, [][]byte) {
|
||||
if len(roots) == 0 || (len(roots) == 1 && roots[0] == nil) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, r := range roots {
|
||||
if bytes.Equal(r, checkpointRoot) {
|
||||
return true, [][]byte{r}
|
||||
}
|
||||
children := lookupValuesForIndices(ctx, map[string][]byte{string(blockParentRootIndicesBucket): r}, tx)
|
||||
if len(children) == 0 {
|
||||
children = [][][]byte{nil}
|
||||
}
|
||||
isPath, path := pathToFinalizedCheckpoint(ctx, children[0], checkpointRoot, tx)
|
||||
if isPath {
|
||||
return true, append([][]byte{r}, path...)
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func updatePrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt *bolt.Bucket, checkpoint *ethpb.Checkpoint) error {
|
||||
enc, err := encode(ctx, checkpoint)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return finalizedBkt.Put(previousFinalizedCheckpointKey, enc)
|
||||
}
|
||||
|
||||
func updateChildOfPrevFinalizedCheckpoint(ctx context.Context, span *trace.Span, finalizedBkt, parentBkt *bolt.Bucket, checkpointRoot []byte) error {
|
||||
container := ðpb.FinalizedBlockRootContainer{}
|
||||
if err := decode(ctx, finalizedBkt.Get(checkpointRoot), container); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
container.ChildRoot = parentBkt.Get(checkpointRoot)
|
||||
enc, err := encode(ctx, container)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err = finalizedBkt.Put(checkpointRoot, enc); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,30 +26,38 @@ func TestStore_IsFinalizedBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
|
||||
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
// a state is required to save checkpoint
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
for i := uint64(0); i <= slotsPerEpoch; i++ {
|
||||
root, err = blks[i].Block().HashTreeRoot()
|
||||
// All blocks up to slotsPerEpoch*2 should be in the finalized index.
|
||||
for i := uint64(0); i < slotsPerEpoch*2; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
|
||||
}
|
||||
for i := slotsPerEpoch + 1; i < uint64(len(blks)); i++ {
|
||||
root, err = blks[i].Block().HashTreeRoot()
|
||||
for i := slotsPerEpoch * 3; i < uint64(len(blks)); i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized, but should not have", i)
|
||||
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index %d was considered finalized in the index, but should not have", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_IsFinalizedGenesisBlock(t *testing.T) {
|
||||
func TestStore_IsFinalizedBlockGenesis(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -61,114 +69,136 @@ func TestStore_IsFinalizedGenesisBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root))
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Finalized genesis block doesn't exist in db")
|
||||
}
|
||||
|
||||
// This test scenario is to test a specific edge case where the finalized block root is not part of
|
||||
// the finalized and canonical chain.
|
||||
//
|
||||
// Example:
|
||||
// 0 1 2 3 4 5 6 slot
|
||||
// a <- b <-- d <- e <- f <- g roots
|
||||
//
|
||||
// ^- c
|
||||
//
|
||||
// Imagine that epochs are 2 slots and that epoch 1, 2, and 3 are finalized. Checkpoint roots would
|
||||
// be c, e, and g. In this scenario, c was a finalized checkpoint root but no block built upon it so
|
||||
// it should not be considered "final and canonical" in the view at slot 6.
|
||||
func TestStore_IsFinalized_ForkEdgeCase(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
blocks0 := makeBlocks(t, slotsPerEpoch*0, slotsPerEpoch, genesisBlockRoot)
|
||||
blocks1 := append(
|
||||
makeBlocks(t, slotsPerEpoch*1, 1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1]))), // No block builds off of the first block in epoch.
|
||||
makeBlocks(t, slotsPerEpoch*1+1, slotsPerEpoch-1, bytesutil.ToBytes32(sszRootOrDie(t, blocks0[len(blocks0)-1])))...,
|
||||
)
|
||||
blocks2 := makeBlocks(t, slotsPerEpoch*2, slotsPerEpoch, bytesutil.ToBytes32(sszRootOrDie(t, blocks1[len(blocks1)-1])))
|
||||
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
require.NoError(t, db.SaveBlocks(ctx, blocks0))
|
||||
require.NoError(t, db.SaveBlocks(ctx, blocks1))
|
||||
require.NoError(t, db.SaveBlocks(ctx, blocks2))
|
||||
|
||||
// First checkpoint
|
||||
checkpoint1 := ðpb.Checkpoint{
|
||||
Root: sszRootOrDie(t, blocks1[0]),
|
||||
Epoch: 1,
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
// A state is required to save checkpoint
|
||||
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint1.Root)))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint1))
|
||||
// All blocks in blocks0 and blocks1 should be finalized and canonical.
|
||||
for i, block := range append(blocks0, blocks1...) {
|
||||
root := sszRootOrDie(t, block)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
|
||||
}
|
||||
|
||||
// Second checkpoint
|
||||
checkpoint2 := ðpb.Checkpoint{
|
||||
Root: sszRootOrDie(t, blocks2[0]),
|
||||
Epoch: 2,
|
||||
}
|
||||
// A state is required to save checkpoint
|
||||
require.NoError(t, db.SaveState(ctx, st, bytesutil.ToBytes32(checkpoint2.Root)))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, checkpoint2))
|
||||
// All blocks in blocks0 and blocks2 should be finalized and canonical.
|
||||
for i, block := range append(blocks0, blocks2...) {
|
||||
root := sszRootOrDie(t, block)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)), "%d - Expected block %#x to be finalized", i, root)
|
||||
}
|
||||
// All blocks in blocks1 should be finalized and canonical, except blocks1[0].
|
||||
for i, block := range blocks1 {
|
||||
root := sszRootOrDie(t, block)
|
||||
if db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(root)) == (i == 0) {
|
||||
t.Errorf("Expected db.IsFinalizedBlock(ctx, blocks1[%d]) to be %v", i, i != 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_IsFinalizedChildBlock(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
ctx := context.Background()
|
||||
db := setupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*2, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
root, err = blks[i].Block().HashTreeRoot()
|
||||
eval := func(t testing.TB, ctx context.Context, db *Store, blks []interfaces.ReadOnlySignedBeaconBlock) {
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
|
||||
blk, err := db.FinalizedChildBlock(ctx, root)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, blk == nil, "Child block at index %d was not considered finalized", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ChildRootOfPrevFinalizedCheckpointIsUpdated(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
ctx := context.Background()
|
||||
db := setupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
root2, err := blks[slotsPerEpoch*2].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: root2[:],
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
require.NoError(t, db.db.View(func(tx *bolt.Tx) error {
|
||||
container := ðpb.FinalizedBlockRootContainer{}
|
||||
f := tx.Bucket(finalizedBlockRootsIndexBucket).Get(root[:])
|
||||
require.NoError(t, decode(ctx, f, container))
|
||||
r, err := blks[slotsPerEpoch+1].Block().HashTreeRoot()
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, r[:], container.ChildRoot)
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
// a state is required to save checkpoint
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
func TestStore_OrphanedBlockIsNotFinalized(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
blk0 := util.NewBeaconBlock()
|
||||
blk0.Block.ParentRoot = genesisBlockRoot[:]
|
||||
blk0Root, err := blk0.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blk1 := util.NewBeaconBlock()
|
||||
blk1.Block.Slot = 1
|
||||
blk1.Block.ParentRoot = blk0Root[:]
|
||||
blk2 := util.NewBeaconBlock()
|
||||
blk2.Block.Slot = 2
|
||||
// orphan block at index 1
|
||||
blk2.Block.ParentRoot = blk0Root[:]
|
||||
blk2Root, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
sBlk0, err := consensusblocks.NewSignedBeaconBlock(blk0)
|
||||
require.NoError(t, err)
|
||||
sBlk1, err := consensusblocks.NewSignedBeaconBlock(blk1)
|
||||
require.NoError(t, err)
|
||||
sBlk2, err := consensusblocks.NewSignedBeaconBlock(blk2)
|
||||
require.NoError(t, err)
|
||||
blks := append([]interfaces.ReadOnlySignedBeaconBlock{sBlk0, sBlk1, sBlk2}, makeBlocks(t, 3, slotsPerEpoch*2-3, blk2Root)...)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
for i := uint64(0); i <= slotsPerEpoch; i++ {
|
||||
root, err = blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
if i == 1 {
|
||||
assert.Equal(t, false, db.IsFinalizedBlock(ctx, root), "Block at index 1 was considered finalized, but should not have")
|
||||
} else {
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized", i)
|
||||
// All blocks up to slotsPerEpoch should have a finalized child block.
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, db.IsFinalizedBlock(ctx, root), "Block at index %d was not considered finalized in the index", i)
|
||||
blk, err := db.FinalizedChildBlock(ctx, root)
|
||||
assert.NoError(t, err)
|
||||
if blk == nil {
|
||||
t.Error("Child block doesn't exist for valid finalized block.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setup := func(t testing.TB) *Store {
|
||||
db := setupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
t.Run("phase0", func(t *testing.T) {
|
||||
db := setup(t)
|
||||
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*3, genesisBlockRoot)
|
||||
eval(t, ctx, db, blks)
|
||||
})
|
||||
|
||||
t.Run("altair", func(t *testing.T) {
|
||||
db := setup(t)
|
||||
|
||||
blks := makeBlocksAltair(t, 0, slotsPerEpoch*3, genesisBlockRoot)
|
||||
eval(t, ctx, db, blks)
|
||||
})
|
||||
}
|
||||
|
||||
func sszRootOrDie(t *testing.T, block interfaces.ReadOnlySignedBeaconBlock) []byte {
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
return root[:]
|
||||
}
|
||||
|
||||
func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
|
||||
@@ -189,6 +219,24 @@ func makeBlocks(t *testing.T, i, n uint64, previousRoot [32]byte) []interfaces.R
|
||||
return ifaceBlocks
|
||||
}
|
||||
|
||||
func makeBlocksAltair(t *testing.T, startIdx, num uint64, previousRoot [32]byte) []interfaces.ReadOnlySignedBeaconBlock {
|
||||
blocks := make([]*ethpb.SignedBeaconBlockAltair, num)
|
||||
ifaceBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, num)
|
||||
for j := startIdx; j < num+startIdx; j++ {
|
||||
parentRoot := make([]byte, fieldparams.RootLength)
|
||||
copy(parentRoot, previousRoot[:])
|
||||
blocks[j-startIdx] = util.NewBeaconBlockAltair()
|
||||
blocks[j-startIdx].Block.Slot = primitives.Slot(j + 1)
|
||||
blocks[j-startIdx].Block.ParentRoot = parentRoot
|
||||
var err error
|
||||
previousRoot, err = blocks[j-startIdx].Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ifaceBlocks[j-startIdx], err = consensusblocks.NewSignedBeaconBlock(blocks[j-startIdx])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return ifaceBlocks
|
||||
}
|
||||
|
||||
func TestStore_BackfillFinalizedIndexSingle(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -14,6 +14,7 @@ var migrations = []migration{
|
||||
migrateArchivedIndex,
|
||||
migrateBlockSlotIndex,
|
||||
migrateStateValidators,
|
||||
migrateFinalizedParent,
|
||||
}
|
||||
|
||||
// RunMigrations defined in the migrations array.
|
||||
|
||||
87
beacon-chain/db/kv/migration_finalized_parent.go
Normal file
87
beacon-chain/db/kv/migration_finalized_parent.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var migrationFinalizedParent = []byte("parent_bug_32fb183")
|
||||
|
||||
func migrateFinalizedParent(ctx context.Context, db *bolt.DB) error {
|
||||
if updateErr := db.Update(func(tx *bolt.Tx) error {
|
||||
mb := tx.Bucket(migrationsBucket)
|
||||
if b := mb.Get(migrationFinalizedParent); bytes.Equal(b, migrationCompleted) {
|
||||
return nil // Migration already completed.
|
||||
}
|
||||
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("unable to read %s bucket for migration", finalizedBlockRootsIndexBucket)
|
||||
}
|
||||
bb := tx.Bucket(blocksBucket)
|
||||
if bb == nil {
|
||||
return fmt.Errorf("unable to read %s bucket for migration", blocksBucket)
|
||||
}
|
||||
|
||||
c := bkt.Cursor()
|
||||
var slotsWithoutBug primitives.Slot
|
||||
maxBugSearch := params.BeaconConfig().SlotsPerEpoch * 10
|
||||
for k, v := c.Last(); k != nil; k, v = c.Prev() {
|
||||
// check if context is cancelled in between
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
idxEntry := ðpb.FinalizedBlockRootContainer{}
|
||||
if err := decode(ctx, v, idxEntry); err != nil {
|
||||
return errors.Wrapf(err, "unable to decode finalized block root container for root=%#x", k)
|
||||
}
|
||||
// Not one of the corrupt values
|
||||
if !bytes.Equal(idxEntry.ParentRoot, k) {
|
||||
slotsWithoutBug += 1
|
||||
if slotsWithoutBug > maxBugSearch {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
slotsWithoutBug = 0
|
||||
log.WithField("root", fmt.Sprintf("%#x", k)).Debug("found index entry with incorrect parent root")
|
||||
|
||||
// Look up full block to get the correct parent root.
|
||||
encBlk := bb.Get(k)
|
||||
if encBlk == nil {
|
||||
return errors.Wrapf(ErrNotFound, "could not find block for corrupt finalized index entry %#x", k)
|
||||
}
|
||||
blk, err := unmarshalBlock(ctx, encBlk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to decode block for root=%#x", k)
|
||||
}
|
||||
// Replace parent root in the index with the correct value and write it back.
|
||||
pr := blk.Block().ParentRoot()
|
||||
idxEntry.ParentRoot = pr[:]
|
||||
idxEnc, err := encode(ctx, idxEntry)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to encode finalized index entry for root=%#x", k)
|
||||
}
|
||||
if err := bkt.Put(k, idxEnc); err != nil {
|
||||
return errors.Wrapf(err, "failed to update finalized index entry for root=%#x", k)
|
||||
}
|
||||
log.WithField("root", fmt.Sprintf("%#x", k)).
|
||||
WithField("parentRoot", fmt.Sprintf("%#x", idxEntry.ParentRoot)).
|
||||
Debug("updated corrupt index entry with correct parent")
|
||||
}
|
||||
// Mark migration complete.
|
||||
return mb.Put(migrationFinalizedParent, migrationCompleted)
|
||||
}); updateErr != nil {
|
||||
log.WithError(updateErr).Errorf("could not run finalized parent root index repair migration")
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -458,7 +458,7 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
bkt = tx.Bucket(stateBucket)
|
||||
// Safeguard against deleting genesis, finalized, head state.
|
||||
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
|
||||
return ErrDeleteFinalized
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
// Nothing to delete if state doesn't exist.
|
||||
|
||||
@@ -70,12 +70,12 @@ func (s *Store) LastEpochWrittenForValidators(
|
||||
return attestedEpochs, err
|
||||
}
|
||||
|
||||
// SaveLastEpochsWrittenForValidators updates the latest epoch a slice
|
||||
// of validator indices has attested to.
|
||||
func (s *Store) SaveLastEpochsWrittenForValidators(
|
||||
// SaveLastEpochWrittenForValidators saves the latest epoch
|
||||
// that each validator has attested to in the provided map.
|
||||
func (s *Store) SaveLastEpochWrittenForValidators(
|
||||
ctx context.Context, epochByValIndex map[primitives.ValidatorIndex]primitives.Epoch,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochWrittenForValidators")
|
||||
defer span.End()
|
||||
|
||||
const batchSize = 10000
|
||||
@@ -157,7 +157,7 @@ func (s *Store) CheckAttesterDoubleVotes(
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
|
||||
encEpoch := encodeTargetEpoch(attToProcess.IndexedAttestation.Data.Target.Epoch)
|
||||
localDoubleVotes := []*slashertypes.AttesterDoubleVote{}
|
||||
localDoubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
|
||||
|
||||
for _, valIdx := range attToProcess.IndexedAttestation.AttestingIndices {
|
||||
// Check if there is signing root in the database for this combination
|
||||
@@ -166,7 +166,7 @@ func (s *Store) CheckAttesterDoubleVotes(
|
||||
validatorEpochKey := append(encEpoch, encIdx...)
|
||||
attRecordsKey := signingRootsBkt.Get(validatorEpochKey)
|
||||
|
||||
// An attestation record key is comprised of a signing root (32 bytes).
|
||||
// An attestation record key consists of a signing root (32 bytes).
|
||||
if len(attRecordsKey) < attestationRecordKeySize {
|
||||
// If there is no signing root for this combination,
|
||||
// then there is no double vote. We can continue to the next validator.
|
||||
@@ -697,7 +697,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
|
||||
}
|
||||
|
||||
// Encode attestation record to bytes.
|
||||
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
|
||||
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
|
||||
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
|
||||
if att == nil || att.IndexedAttestation == nil {
|
||||
return []byte{}, errors.New("nil proposal record")
|
||||
@@ -716,7 +716,7 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
|
||||
}
|
||||
|
||||
// Decode attestation record from bytes.
|
||||
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
|
||||
// The input encoded attestation record consists in the signing root concatenated with the compressed attestation record.
|
||||
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
|
||||
if len(encoded) < rootSize {
|
||||
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", rootSize, len(encoded))
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestStore_LastEpochWrittenForValidators(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(attestedEpochs))
|
||||
|
||||
err = beaconDB.SaveLastEpochsWrittenForValidators(ctx, epochsByValidator)
|
||||
err = beaconDB.SaveLastEpochWrittenForValidators(ctx, epochsByValidator)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedEpochs, err := beaconDB.LastEpochWrittenForValidators(ctx, indices)
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
// TestCleanup ensures that the cleanup function unregisters the prometheus.Collection
|
||||
// also tests the interchangability of the explicit prometheus Register/Unregister
|
||||
// also tests the interchangeability of the explicit prometheus Register/Unregister
|
||||
// and the implicit methods within the collector implementation
|
||||
func TestCleanup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -32,11 +32,11 @@ func TestCleanup(t *testing.T) {
|
||||
assert.Equal(t, true, unregistered, "prometheus.Unregister failed to unregister PowchainCollector on final cleanup")
|
||||
}
|
||||
|
||||
// TestCancelation tests that canceling the context passed into
|
||||
// TestCancellation tests that canceling the context passed into
|
||||
// NewPowchainCollector cleans everything up as expected. This
|
||||
// does come at the cost of an extra channel cluttering up
|
||||
// PowchainCollector, just for this test.
|
||||
func TestCancelation(t *testing.T) {
|
||||
func TestCancellation(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
pc, err := NewPowchainCollector(ctx)
|
||||
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
|
||||
|
||||
@@ -643,9 +643,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
if err := b.BlobStorage.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
@@ -707,6 +705,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
|
||||
@@ -217,9 +217,9 @@ func Test_hasNetworkFlag(t *testing.T) {
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Prater testnet",
|
||||
networkName: features.PraterTestnet.Name,
|
||||
networkValue: "prater",
|
||||
name: "Holesky testnet",
|
||||
networkName: features.HoleskyTestnet.Name,
|
||||
networkValue: "holesky",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -46,6 +46,7 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -56,6 +57,7 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -90,6 +92,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
@@ -95,7 +97,12 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastAttestation(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
att *ethpb.Attestation,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -137,11 +144,11 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
// In the event our attestation is outdated and beyond the
|
||||
// acceptable threshold, we exit early and do not broadcast it.
|
||||
currSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
|
||||
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.genesisTime, params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestationSlot": att.Data.Slot,
|
||||
"currentSlot": currSlot,
|
||||
}).Warning("Attestation is too old to broadcast, discarding it")
|
||||
}).WithError(err).Warning("Attestation is too old to broadcast, discarding it")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -151,7 +158,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -227,7 +234,12 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastBlob(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
blobSidecar *ethpb.BlobSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -242,7 +254,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
blobSidecarBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
@@ -251,7 +263,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
blobSidecarBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
@@ -267,6 +279,99 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
// TODO: Add tests
|
||||
func (s *Service) BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.New("attempted to broadcast nil data column sidecar")
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, columnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Clear parent context / deadline.
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Check if we have peers with this subnet.
|
||||
hasPeer := func() bool {
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
defer s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
return s.hasPeerWithSubnet(topic)
|
||||
}()
|
||||
|
||||
// If no peers are found, attempt to find peers with this subnet.
|
||||
if !hasPeer {
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find peers for subnet")
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
blobSidecarBroadcasts.Inc()
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -296,14 +401,18 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ type Config struct {
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
MetaDataDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
@@ -39,10 +40,21 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
type (
|
||||
quicProtocol uint16
|
||||
custodySubnetCount uint64
|
||||
)
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
func (custodySubnetCount) ENRKey() string { return "custody_subnet_count" }
|
||||
|
||||
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
|
||||
// This routine checks for our attestation, sync committee and data column subnets and updates them if they have
|
||||
// been rotated.
|
||||
func (s *Service) RefreshPersistentSubnets() {
|
||||
// return early if discv5 isnt running
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
@@ -52,6 +64,10 @@ func (s *Service) RefreshENR() {
|
||||
log.WithError(err).Error("Could not initialize persistent subnets")
|
||||
return
|
||||
}
|
||||
if err := initializePersistentColumnSubnets(s.dv5Listener.LocalNode().ID()); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent column subnets")
|
||||
return
|
||||
}
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
committees := cache.SubnetIDs.GetAllSubnets()
|
||||
@@ -100,14 +116,15 @@ func (s *Service) RefreshENR() {
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
iterator = enode.Filter(iterator, s.filterPeer)
|
||||
iterator := enode.Filter(s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
defer iterator.Close()
|
||||
|
||||
for {
|
||||
// Exit if service's context is canceled
|
||||
// Exit if service's context is canceled.
|
||||
if s.ctx.Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if s.isPeerAtLimit(false /* inbound */) {
|
||||
// Pause the main loop for a period to stop looking
|
||||
// for new peers.
|
||||
@@ -115,16 +132,22 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
exists := iterator.Next()
|
||||
if !exists {
|
||||
|
||||
if exists := iterator.Next(); !exists {
|
||||
break
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
continue
|
||||
}
|
||||
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
go func(info *peer.AddrInfo) {
|
||||
@@ -167,8 +190,7 @@ func (s *Service) createListener(
|
||||
|
||||
// Listen to all network interfaces
|
||||
// for both ip protocols.
|
||||
networkVersion := "udp"
|
||||
conn, err := net.ListenUDP(networkVersion, udpAddr)
|
||||
conn, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not listen to UDP")
|
||||
}
|
||||
@@ -178,6 +200,7 @@ func (s *Service) createListener(
|
||||
ipAddr,
|
||||
int(s.cfg.UDPPort),
|
||||
int(s.cfg.TCPPort),
|
||||
int(s.cfg.QUICPort),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create local node")
|
||||
@@ -209,7 +232,7 @@ func (s *Service) createListener(
|
||||
func (s *Service) createLocalNode(
|
||||
privKey *ecdsa.PrivateKey,
|
||||
ipAddr net.IP,
|
||||
udpPort, tcpPort int,
|
||||
udpPort, tcpPort, quicPort int,
|
||||
) (*enode.LocalNode, error) {
|
||||
db, err := enode.OpenDB("")
|
||||
if err != nil {
|
||||
@@ -218,11 +241,24 @@ func (s *Service) createLocalNode(
|
||||
localNode := enode.NewLocalNode(db, privKey)
|
||||
|
||||
ipEntry := enr.IP(ipAddr)
|
||||
udpEntry := enr.UDP(udpPort)
|
||||
tcpEntry := enr.TCP(tcpPort)
|
||||
localNode.Set(ipEntry)
|
||||
|
||||
udpEntry := enr.UDP(udpPort)
|
||||
localNode.Set(udpEntry)
|
||||
|
||||
tcpEntry := enr.TCP(tcpPort)
|
||||
localNode.Set(tcpEntry)
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
quicEntry := quicProtocol(quicPort)
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if features.Get().EnablePeerDAS {
|
||||
custodySubnetEntry := custodySubnetCount(params.BeaconConfig().CustodyRequirement)
|
||||
localNode.Set(custodySubnetEntry)
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
@@ -277,7 +313,7 @@ func (s *Service) startDiscoveryV5(
|
||||
// filterPeer validates each node that we retrieve from our dht. We
|
||||
// try to ascertain that the peer can be a valid protocol peer.
|
||||
// Validity Conditions:
|
||||
// 1. Peer has a valid IP and TCP port set in their enr.
|
||||
// 1. Peer has a valid IP and a (QUIC and/or TCP) port set in their enr.
|
||||
// 2. Peer hasn't been marked as 'bad'.
|
||||
// 3. Peer is not currently active or connected.
|
||||
// 4. Peer is ready to receive incoming connections.
|
||||
@@ -294,17 +330,13 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore nodes with their TCP ports not set.
|
||||
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
|
||||
if !enr.IsNotFound(err) {
|
||||
log.WithError(err).Debug("Could not retrieve tcp port")
|
||||
}
|
||||
peerData, multiAddrs, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
return false
|
||||
}
|
||||
|
||||
peerData, multiAddr, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
if peerData == nil || len(multiAddrs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -337,6 +369,9 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// If the peer has 2 multiaddrs, favor the QUIC address, which is in first position.
|
||||
multiAddr := multiAddrs[0]
|
||||
|
||||
// Add peer to peer handler.
|
||||
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
|
||||
|
||||
@@ -380,11 +415,11 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get enode from string")
|
||||
}
|
||||
addr, err := convertToSingleMultiAddr(enodeAddr)
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(enodeAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
allAddrs = append(allAddrs, nodeAddrs...)
|
||||
}
|
||||
return allAddrs, nil
|
||||
}
|
||||
@@ -419,45 +454,139 @@ func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) {
|
||||
}
|
||||
|
||||
func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
|
||||
var multiAddrs []ma.Multiaddr
|
||||
// Expect each node to have a TCP and a QUIC address.
|
||||
multiAddrs := make([]ma.Multiaddr, 0, 2*len(nodes))
|
||||
|
||||
for _, node := range nodes {
|
||||
// ignore nodes with no ip address stored
|
||||
// Skip nodes with no ip address stored.
|
||||
if node.IP() == nil {
|
||||
continue
|
||||
}
|
||||
multiAddr, err := convertToSingleMultiAddr(node)
|
||||
|
||||
// Get up to two multiaddrs (TCP and QUIC) for each node.
|
||||
nodeMultiAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to multiAddr")
|
||||
log.WithError(err).Errorf("Could not convert to multiAddr node %s", node)
|
||||
continue
|
||||
}
|
||||
multiAddrs = append(multiAddrs, multiAddr)
|
||||
|
||||
multiAddrs = append(multiAddrs, nodeMultiAddrs...)
|
||||
}
|
||||
|
||||
return multiAddrs
|
||||
}
|
||||
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, ma.Multiaddr, error) {
|
||||
multiAddr, err := convertToSingleMultiAddr(node)
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
|
||||
multiAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
info, err := peer.AddrInfoFromP2pAddr(multiAddr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
if len(multiAddrs) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return info, multiAddr, nil
|
||||
|
||||
infos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert to peer info: %v", multiAddrs)
|
||||
}
|
||||
|
||||
if len(infos) != 1 {
|
||||
return nil, nil, errors.Errorf("infos contains %v elements, expected exactly 1", len(infos))
|
||||
}
|
||||
|
||||
return &infos[0], multiAddrs, nil
|
||||
}
|
||||
|
||||
func convertToSingleMultiAddr(node *enode.Node) (ma.Multiaddr, error) {
|
||||
// retrieveMultiAddrsFromNode converts an enode.Node to a list of multiaddrs.
|
||||
// If the node has a both a QUIC and a TCP port set in their ENR, then
|
||||
// the multiaddr corresponding to the QUIC port is added first, followed
|
||||
// by the multiaddr corresponding to the TCP port.
|
||||
func retrieveMultiAddrsFromNode(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
multiaddrs := make([]ma.Multiaddr, 0, 2)
|
||||
|
||||
// Retrieve the node public key.
|
||||
pubkey := node.Pubkey()
|
||||
assertedKey, err := ecdsaprysm.ConvertToInterfacePubkey(pubkey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get pubkey")
|
||||
}
|
||||
|
||||
// Compute the node ID from the public key.
|
||||
id, err := peer.IDFromPublicKey(assertedKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get peer id")
|
||||
}
|
||||
return multiAddressBuilderWithID(node.IP().String(), "tcp", uint(node.TCP()), id)
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
// If the QUIC entry is present in the ENR, build the corresponding multiaddress.
|
||||
port, ok, err := getPort(node, quic)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get QUIC port")
|
||||
}
|
||||
|
||||
if ok {
|
||||
addr, err := multiAddressBuilderWithID(node.IP(), quic, port, id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not build QUIC address")
|
||||
}
|
||||
|
||||
multiaddrs = append(multiaddrs, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If the TCP entry is present in the ENR, build the corresponding multiaddress.
|
||||
port, ok, err := getPort(node, tcp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get TCP port")
|
||||
}
|
||||
|
||||
if ok {
|
||||
addr, err := multiAddressBuilderWithID(node.IP(), tcp, port, id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not build TCP address")
|
||||
}
|
||||
|
||||
multiaddrs = append(multiaddrs, addr)
|
||||
}
|
||||
|
||||
return multiaddrs, nil
|
||||
}
|
||||
|
||||
// getPort retrieves the port for a given node and protocol, as well as a boolean
|
||||
// indicating whether the port was found, and an error
|
||||
func getPort(node *enode.Node, protocol internetProtocol) (uint, bool, error) {
|
||||
var (
|
||||
port uint
|
||||
err error
|
||||
)
|
||||
|
||||
switch protocol {
|
||||
case tcp:
|
||||
var entry enr.TCP
|
||||
err = node.Load(&entry)
|
||||
port = uint(entry)
|
||||
case udp:
|
||||
var entry enr.UDP
|
||||
err = node.Load(&entry)
|
||||
port = uint(entry)
|
||||
case quic:
|
||||
var entry quicProtocol
|
||||
err = node.Load(&entry)
|
||||
port = uint(entry)
|
||||
default:
|
||||
return 0, false, errors.Errorf("invalid protocol: %v", protocol)
|
||||
}
|
||||
|
||||
if enr.IsNotFound(err) {
|
||||
return port, false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, false, errors.Wrap(err, "could not get port")
|
||||
}
|
||||
|
||||
return port, true, nil
|
||||
}
|
||||
|
||||
func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
@@ -475,14 +604,14 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
var ip4 enr.IPv4
|
||||
var ip6 enr.IPv6
|
||||
if node.Load(&ip4) == nil {
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip4).String(), "udp", uint(node.UDP()), id)
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip4), udp, uint(node.UDP()), id)
|
||||
if ipErr != nil {
|
||||
return nil, errors.Wrap(ipErr, "could not build IPv4 address")
|
||||
}
|
||||
addresses = append(addresses, address)
|
||||
}
|
||||
if node.Load(&ip6) == nil {
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip6).String(), "udp", uint(node.UDP()), id)
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip6), udp, uint(node.UDP()), id)
|
||||
if ipErr != nil {
|
||||
return nil, errors.Wrap(ipErr, "could not build IPv6 address")
|
||||
}
|
||||
|
||||
@@ -166,8 +166,9 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Define ports.
|
||||
const (
|
||||
udpPort = 2000
|
||||
tcpPort = 3000
|
||||
udpPort = 2000
|
||||
tcpPort = 3000
|
||||
quicPort = 3000
|
||||
)
|
||||
|
||||
// Create a private key.
|
||||
@@ -180,7 +181,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
cfg: tt.cfg,
|
||||
}
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort)
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
|
||||
if tt.expectedError {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
@@ -237,7 +238,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
}
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0)
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
|
||||
require.NoError(t, err)
|
||||
multiAddr := convertToMultiAddr([]*enode.Node{node.Node()})
|
||||
assert.Equal(t, 0, len(multiAddr), "Invalid ip address converted successfully")
|
||||
@@ -248,8 +249,9 @@ func TestMultiAddrConversion_OK(t *testing.T) {
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
TCPPort: 0,
|
||||
UDPPort: 0,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
@@ -315,7 +317,7 @@ func TestHostIsResolved(t *testing.T) {
|
||||
// As defined in RFC 2606 , example.org is a
|
||||
// reserved example domain name.
|
||||
exampleHost := "example.org"
|
||||
exampleIP := "93.184.216.34"
|
||||
exampleIP := "93.184.215.14"
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
@@ -599,7 +601,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcBuilder(t)
|
||||
s.RefreshENR()
|
||||
s.RefreshPersistentSubnets()
|
||||
tt.postValidation(t, s)
|
||||
s.dv5Listener.Close()
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
@@ -28,7 +28,8 @@ import (
|
||||
)
|
||||
|
||||
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
port := 2000
|
||||
const port = 2000
|
||||
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
|
||||
@@ -53,7 +54,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
for i := 1; i <= 5; i++ {
|
||||
port = 3000 + i
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
@@ -98,13 +99,14 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
var addrs []ma.Multiaddr
|
||||
|
||||
for _, n := range nodes {
|
||||
if s.filterPeer(n) {
|
||||
addr, err := convertToSingleMultiAddr(n)
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, addr)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,10 +116,11 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
@@ -138,7 +141,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
for i := 1; i <= 5; i++ {
|
||||
port = 3000 + i
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
@@ -188,13 +191,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
var addrs []ma.Multiaddr
|
||||
addrs := make([]ma.Multiaddr, 0, len(nodes))
|
||||
|
||||
for _, n := range nodes {
|
||||
if s.filterPeer(n) {
|
||||
addr, err := convertToSingleMultiAddr(n)
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, addr)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
|
||||
@@ -22,6 +22,7 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
BlobSubnetTopicFormat: ðpb.BlobSidecar{},
|
||||
DataColumnSubnetTopicFormat: ðpb.DataColumnSidecar{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
@@ -30,12 +31,18 @@ type P2P interface {
|
||||
MetadataProvider
|
||||
}
|
||||
|
||||
type Acceser interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
type Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
@@ -81,8 +88,9 @@ type PeerManager interface {
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshENR()
|
||||
RefreshPersistentSubnets()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -12,32 +13,32 @@ import (
|
||||
var log = logrus.WithField("prefix", "p2p")
|
||||
|
||||
func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
|
||||
var correctAddr ma.Multiaddr
|
||||
for _, addr := range addrs {
|
||||
if strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/") {
|
||||
correctAddr = addr
|
||||
break
|
||||
if !(strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/")) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if correctAddr != nil {
|
||||
|
||||
log.WithField(
|
||||
"multiAddr",
|
||||
correctAddr.String()+"/p2p/"+id.String(),
|
||||
addr.String()+"/p2p/"+id.String(),
|
||||
).Info("Node started p2p server")
|
||||
}
|
||||
}
|
||||
|
||||
func logExternalIPAddr(id peer.ID, addr string, port uint) {
|
||||
func logExternalIPAddr(id peer.ID, addr string, tcpPort, quicPort uint) {
|
||||
if addr != "" {
|
||||
multiAddr, err := MultiAddressBuilder(addr, port)
|
||||
multiAddrs, err := MultiAddressBuilder(net.ParseIP(addr), tcpPort, quicPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not create multiaddress")
|
||||
return
|
||||
}
|
||||
log.WithField(
|
||||
"multiAddr",
|
||||
multiAddr.String()+"/p2p/"+id.String(),
|
||||
).Info("Node started external p2p server")
|
||||
|
||||
for _, multiAddr := range multiAddrs {
|
||||
log.WithField(
|
||||
"multiAddr",
|
||||
multiAddr.String()+"/p2p/"+id.String(),
|
||||
).Info("Node started external p2p server")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -60,17 +60,21 @@ var (
|
||||
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
|
||||
"until a subnet peer can be found.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
|
||||
Help: "The number of blob sidecar messages that were broadcast with no peer on.",
|
||||
})
|
||||
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
|
||||
Help: "The number of blob sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
dataColumnSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_attempted_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
|
||||
@@ -11,40 +11,68 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
gomplex "github.com/libp2p/go-mplex"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
type internetProtocol string
|
||||
|
||||
const (
|
||||
udp = "udp"
|
||||
tcp = "tcp"
|
||||
quic = "quic"
|
||||
)
|
||||
|
||||
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
|
||||
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
func MultiAddressBuilder(ip net.IP, tcpPort, quicPort uint) ([]ma.Multiaddr, error) {
|
||||
ipType, err := extractIpType(ip)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to determine IP type")
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
|
||||
// Example: /ip4/1.2.3.4./tcp/5678
|
||||
multiaddrStr := fmt.Sprintf("/%s/%s/tcp/%d", ipType, ip, tcpPort)
|
||||
multiAddrTCP, err := ma.NewMultiaddr(multiaddrStr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce TCP multiaddr format from %s:%d", ip, tcpPort)
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
|
||||
|
||||
multiaddrs := []ma.Multiaddr{multiAddrTCP}
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
// Example: /ip4/1.2.3.4/udp/5678/quic-v1
|
||||
multiAddrQUIC, err := ma.NewMultiaddr(fmt.Sprintf("/%s/%s/udp/%d/quic-v1", ipType, ip, quicPort))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce QUIC multiaddr format from %s:%d", ip, tcpPort)
|
||||
}
|
||||
|
||||
multiaddrs = append(multiaddrs, multiAddrQUIC)
|
||||
}
|
||||
|
||||
return multiaddrs, nil
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
|
||||
cfg := s.cfg
|
||||
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
multiaddrs, err := MultiAddressBuilder(ip, cfg.TCPPort, cfg.QUICPort)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip.String(), cfg.TCPPort)
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip, cfg.TCPPort)
|
||||
}
|
||||
if cfg.LocalIP != "" {
|
||||
if net.ParseIP(cfg.LocalIP) == nil {
|
||||
localIP := net.ParseIP(cfg.LocalIP)
|
||||
if localIP == nil {
|
||||
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
|
||||
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
multiaddrs, err = MultiAddressBuilder(localIP, cfg.TCPPort, cfg.QUICPort)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
@@ -62,36 +90,43 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
|
||||
options := []libp2p.Option{
|
||||
privKeyOption(priKey),
|
||||
libp2p.ListenAddrs(listen),
|
||||
libp2p.ListenAddrs(multiaddrs...),
|
||||
libp2p.UserAgent(version.BuildData()),
|
||||
libp2p.ConnectionGater(s),
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.Transport(libp2ptcp.NewTCPTransport),
|
||||
libp2p.DefaultMuxers,
|
||||
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.Ping(false), // Disable Ping Service.
|
||||
}
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
|
||||
}
|
||||
|
||||
if cfg.EnableUPnP {
|
||||
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
|
||||
}
|
||||
|
||||
if cfg.RelayNodeAddr != "" {
|
||||
options = append(options, libp2p.AddrsFactory(withRelayAddrs(cfg.RelayNodeAddr)))
|
||||
} else {
|
||||
// Disable relay if it has not been set.
|
||||
options = append(options, libp2p.DisableRelay())
|
||||
}
|
||||
|
||||
if cfg.HostAddress != "" {
|
||||
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
|
||||
externalMultiaddrs, err := MultiAddressBuilder(net.ParseIP(cfg.HostAddress), cfg.TCPPort, cfg.QUICPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to create external multiaddress")
|
||||
} else {
|
||||
addrs = append(addrs, external)
|
||||
addrs = append(addrs, externalMultiaddrs...)
|
||||
}
|
||||
return addrs
|
||||
}))
|
||||
}
|
||||
|
||||
if cfg.HostDNS != "" {
|
||||
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
external, err := ma.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%d", cfg.HostDNS, cfg.TCPPort))
|
||||
@@ -107,21 +142,47 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
if features.Get().DisableResourceManager {
|
||||
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
func extractIpType(ip net.IP) (string, error) {
|
||||
if ip.To4() != nil {
|
||||
return "ip4", nil
|
||||
}
|
||||
if id.String() == "" {
|
||||
return nil, errors.New("empty peer id given")
|
||||
|
||||
if ip.To16() != nil {
|
||||
return "ip6", nil
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
|
||||
|
||||
return "", errors.Errorf("provided IP address is neither IPv4 nor IPv6: %s", ip)
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ip net.IP, protocol internetProtocol, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
var multiaddrStr string
|
||||
|
||||
if id == "" {
|
||||
return nil, errors.Errorf("empty peer id given: %s", id)
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
|
||||
|
||||
ipType, err := extractIpType(ip)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to determine IP type")
|
||||
}
|
||||
|
||||
switch protocol {
|
||||
case udp, tcp:
|
||||
// Example with UDP: /ip4/1.2.3.4/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
|
||||
// Example with TCP: /ip6/1.2.3.4/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
|
||||
multiaddrStr = fmt.Sprintf("/%s/%s/%s/%d/p2p/%s", ipType, ip, protocol, port, id)
|
||||
case quic:
|
||||
// Example: /ip4/1.2.3.4/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
|
||||
multiaddrStr = fmt.Sprintf("/%s/%s/udp/%d/quic-v1/p2p/%s", ipType, ip, port, id)
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported protocol: %s", protocol)
|
||||
}
|
||||
|
||||
return ma.NewMultiaddr(multiaddrStr)
|
||||
}
|
||||
|
||||
// Adds a private key to the libp2p option if the option was provided.
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -88,30 +89,34 @@ func TestIPV6Support(t *testing.T) {
|
||||
lNode := enode.NewLocalNode(db, key)
|
||||
mockIPV6 := net.IP{0xff, 0x02, 0xAA, 0, 0x1F, 0, 0x2E, 0, 0, 0x36, 0x45, 0, 0, 0, 0, 0x02}
|
||||
lNode.Set(enr.IP(mockIPV6))
|
||||
ma, err := convertToSingleMultiAddr(lNode.Node())
|
||||
mas, err := retrieveMultiAddrsFromNode(lNode.Node())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ipv6Exists := false
|
||||
for _, p := range ma.Protocols() {
|
||||
if p.Name == "ip4" {
|
||||
t.Error("Got ip4 address instead of ip6")
|
||||
|
||||
for _, ma := range mas {
|
||||
ipv6Exists := false
|
||||
for _, p := range ma.Protocols() {
|
||||
if p.Name == "ip4" {
|
||||
t.Error("Got ip4 address instead of ip6")
|
||||
}
|
||||
if p.Name == "ip6" {
|
||||
ipv6Exists = true
|
||||
}
|
||||
}
|
||||
if p.Name == "ip6" {
|
||||
ipv6Exists = true
|
||||
if !ipv6Exists {
|
||||
t.Error("Multiaddress did not have ipv6 protocol")
|
||||
}
|
||||
}
|
||||
if !ipv6Exists {
|
||||
t.Error("Multiaddress did not have ipv6 protocol")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultMultiplexers(t *testing.T) {
|
||||
var cfg libp2p.Config
|
||||
_ = cfg
|
||||
p2pCfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
svc := &Service{cfg: p2pCfg}
|
||||
@@ -127,5 +132,57 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
|
||||
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
|
||||
|
||||
}
|
||||
|
||||
func TestMultiAddressBuilderWithID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
ip net.IP
|
||||
protocol internetProtocol
|
||||
port uint
|
||||
id string
|
||||
|
||||
expectedMultiaddrStr string
|
||||
}{
|
||||
{
|
||||
name: "UDP",
|
||||
ip: net.IPv4(192, 168, 0, 1),
|
||||
protocol: udp,
|
||||
port: 5678,
|
||||
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
|
||||
|
||||
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
|
||||
},
|
||||
{
|
||||
name: "TCP",
|
||||
ip: net.IPv4(192, 168, 0, 1),
|
||||
protocol: tcp,
|
||||
port: 5678,
|
||||
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
|
||||
|
||||
expectedMultiaddrStr: "/ip4/192.168.0.1/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
|
||||
},
|
||||
{
|
||||
name: "QUIC",
|
||||
ip: net.IPv4(192, 168, 0, 1),
|
||||
protocol: quic,
|
||||
port: 5678,
|
||||
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
|
||||
|
||||
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
id, err := hex.DecodeString(tt.id)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualMultiaddr, err := multiAddressBuilderWithID(tt.ip, tt.protocol, tt.port, peer.ID(id))
|
||||
require.NoError(t, err)
|
||||
|
||||
actualMultiaddrStr := actualMultiaddr.String()
|
||||
require.Equal(t, tt.expectedMultiaddrStr, actualMultiaddrStr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Package peers provides information about peers at the Ethereum consensus protocol level.
|
||||
//
|
||||
// "Protocol level" is the level above the network level, so this layer never sees or interacts with
|
||||
// (for example) hosts that are uncontactable due to being down, firewalled, etc. Instead, this works
|
||||
// (for example) hosts that are unreachable due to being down, firewalled, etc. Instead, this works
|
||||
// with peers that are contactable but may or may not be of the correct fork version, not currently
|
||||
// required due to the number of current connections, etc.
|
||||
//
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
@@ -59,8 +60,8 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
ColocationLimit = 5
|
||||
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
CollocationLimit = 5
|
||||
|
||||
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
|
||||
maxLimitBuffer = 150
|
||||
@@ -76,6 +77,13 @@ const (
|
||||
MaxBackOffDuration = 5000
|
||||
)
|
||||
|
||||
type InternetProtocol string
|
||||
|
||||
const (
|
||||
TCP = "tcp"
|
||||
QUIC = "quic"
|
||||
)
|
||||
|
||||
// Status is the structure holding the peer status information.
|
||||
type Status struct {
|
||||
ctx context.Context
|
||||
@@ -449,6 +457,19 @@ func (p *Status) InboundConnected() []peer.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
// InboundConnectedWithProtocol returns the current batch of inbound peers that are connected with a given protocol.
|
||||
func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Outbound returns the current batch of outbound peers.
|
||||
func (p *Status) Outbound() []peer.ID {
|
||||
p.store.RLock()
|
||||
@@ -475,7 +496,20 @@ func (p *Status) OutboundConnected() []peer.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
// Active returns the peers that are connecting or connected.
|
||||
// OutboundConnectedWithProtocol returns the current batch of outbound peers that are connected with a given protocol.
|
||||
func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Active returns the peers that are active (connecting or connected).
|
||||
func (p *Status) Active() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
@@ -514,7 +548,7 @@ func (p *Status) Disconnected() []peer.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
// Inactive returns the peers that are disconnecting or disconnected.
|
||||
// Inactive returns the peers that are inactive (disconnecting or disconnected).
|
||||
func (p *Status) Inactive() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
@@ -548,7 +582,7 @@ func (p *Status) Prune() {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
// Default to old method if flag isnt enabled.
|
||||
// Default to old method if flag isn't enabled.
|
||||
if !features.Get().EnablePeerScorer {
|
||||
p.deprecatedPrune()
|
||||
return
|
||||
@@ -961,7 +995,7 @@ func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
return true
|
||||
}
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > ColocationLimit {
|
||||
if val > CollocationLimit {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1012,7 +1046,7 @@ func (p *Status) tallyIPTracker() {
|
||||
}
|
||||
|
||||
func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
|
||||
// Exit early if we do get nil multiaddresses
|
||||
// Exit early if we do get nil multi-addresses
|
||||
if firstAddr == nil || secondAddr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -565,7 +565,7 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.ColocationLimit+10; i++ {
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
@@ -1111,6 +1111,87 @@ func TestInbound(t *testing.T) {
|
||||
assert.Equal(t, inbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestInboundConnected(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
|
||||
|
||||
result := p.InboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, inbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestInboundConnectedWithProtocol(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addrsTCP := []string{
|
||||
"/ip4/127.0.0.1/tcp/33333",
|
||||
"/ip4/127.0.0.2/tcp/44444",
|
||||
}
|
||||
|
||||
addrsQUIC := []string{
|
||||
"/ip4/192.168.1.3/udp/13000/quic-v1",
|
||||
"/ip4/192.168.1.4/udp/14000/quic-v1",
|
||||
"/ip4/192.168.1.5/udp/14000/quic-v1",
|
||||
}
|
||||
|
||||
expectedTCP := make(map[string]bool, len(addrsTCP))
|
||||
for _, addr := range addrsTCP {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
expectedQUIC := make(map[string]bool, len(addrsQUIC))
|
||||
for _, addr := range addrsQUIC {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
// TCP
|
||||
// ---
|
||||
|
||||
actualTCP := p.InboundConnectedWithProtocol(peers.TCP)
|
||||
require.Equal(t, len(expectedTCP), len(actualTCP))
|
||||
|
||||
for _, actualPeer := range actualTCP {
|
||||
_, ok := expectedTCP[actualPeer.String()]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
// QUIC
|
||||
// ----
|
||||
actualQUIC := p.InboundConnectedWithProtocol(peers.QUIC)
|
||||
require.Equal(t, len(expectedQUIC), len(actualQUIC))
|
||||
|
||||
for _, actualPeer := range actualQUIC {
|
||||
_, ok := expectedQUIC[actualPeer.String()]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutbound(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
@@ -1130,6 +1211,87 @@ func TestOutbound(t *testing.T) {
|
||||
assert.Equal(t, outbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestOutboundConnected(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
|
||||
|
||||
result := p.OutboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, inbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addrsTCP := []string{
|
||||
"/ip4/127.0.0.1/tcp/33333",
|
||||
"/ip4/127.0.0.2/tcp/44444",
|
||||
}
|
||||
|
||||
addrsQUIC := []string{
|
||||
"/ip4/192.168.1.3/udp/13000/quic-v1",
|
||||
"/ip4/192.168.1.4/udp/14000/quic-v1",
|
||||
"/ip4/192.168.1.5/udp/14000/quic-v1",
|
||||
}
|
||||
|
||||
expectedTCP := make(map[string]bool, len(addrsTCP))
|
||||
for _, addr := range addrsTCP {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
expectedQUIC := make(map[string]bool, len(addrsQUIC))
|
||||
for _, addr := range addrsQUIC {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
// TCP
|
||||
// ---
|
||||
|
||||
actualTCP := p.OutboundConnectedWithProtocol(peers.TCP)
|
||||
require.Equal(t, len(expectedTCP), len(actualTCP))
|
||||
|
||||
for _, actualPeer := range actualTCP {
|
||||
_, ok := expectedTCP[actualPeer.String()]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
// QUIC
|
||||
// ----
|
||||
actualQUIC := p.OutboundConnectedWithProtocol(peers.QUIC)
|
||||
require.Equal(t, len(expectedQUIC), len(actualQUIC))
|
||||
|
||||
for _, actualPeer := range actualQUIC {
|
||||
_, ok := expectedQUIC[actualPeer.String()]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
|
||||
// Set up some peers with different states
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -130,7 +131,7 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a list of pubsub options to configure out router with.
|
||||
// pubsubOptions creates a list of options to configure our router with.
|
||||
func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
psOpts := []pubsub.Option{
|
||||
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
|
||||
@@ -147,9 +148,35 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||
pubsub.WithRawTracer(gossipTracer{host: s.host}),
|
||||
}
|
||||
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
directPeersAddrInfos, err := parsePeersEnr(s.cfg.StaticPeers)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add direct peer option")
|
||||
return psOpts
|
||||
}
|
||||
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
|
||||
}
|
||||
|
||||
return psOpts
|
||||
}
|
||||
|
||||
// parsePeersEnr takes a list of raw ENRs and converts them into a list of AddrInfos.
|
||||
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
|
||||
addrs, err := PeersFromStringAddrs(peers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot convert peers raw ENRs into multiaddresses: %v", err)
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("converting peers raw ENRs into multiaddresses resulted in an empty list")
|
||||
}
|
||||
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot convert peers multiaddresses into AddrInfos: %v", err)
|
||||
}
|
||||
return directAddrInfos, nil
|
||||
}
|
||||
|
||||
// creates a custom gossipsub parameter set.
|
||||
func pubsubGossipParam() pubsub.GossipSubParams {
|
||||
gParams := pubsub.DefaultGossipSubParams()
|
||||
|
||||
@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat || topic == DataColumnSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,9 @@ const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -65,6 +68,9 @@ const (
|
||||
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
|
||||
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root. New in PeerDAS.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
prysmnetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
@@ -124,31 +125,34 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to build p2p options")
|
||||
}
|
||||
|
||||
// Sets mplex timeouts
|
||||
configureMplex()
|
||||
h, err := libp2p.New(opts...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create p2p host")
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "failed to create p2p host")
|
||||
}
|
||||
|
||||
s.host = h
|
||||
|
||||
// Gossipsub registration is done before we add in any new peers
|
||||
// due to libp2p's gossipsub implementation not taking into
|
||||
// account previously added peers when creating the gossipsub
|
||||
// object.
|
||||
psOpts := s.pubsubOptions()
|
||||
|
||||
// Set the pubsub global parameters that we require.
|
||||
setPubSubParameters()
|
||||
|
||||
// Reinitialize them in the event we are running a custom config.
|
||||
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
gs, err := pubsub.NewGossipSub(s.ctx, s.host, psOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to start pubsub")
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "failed to create p2p pubsub")
|
||||
}
|
||||
|
||||
s.pubsub = gs
|
||||
|
||||
s.peers = peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
@@ -213,7 +217,7 @@ func (s *Service) Start() {
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not connect to static peer")
|
||||
log.WithError(err).Error("could not convert ENR to multiaddr")
|
||||
}
|
||||
// Set trusted peers for those that are provided as static addresses.
|
||||
pids := peerIdsFromMultiAddrs(addrs)
|
||||
@@ -222,7 +226,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
// current epoch.
|
||||
s.RefreshENR()
|
||||
s.RefreshPersistentSubnets()
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
|
||||
@@ -230,13 +234,26 @@ func (s *Service) Start() {
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshPersistentSubnets)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"inbound": len(s.peers.InboundConnected()),
|
||||
"outbound": len(s.peers.OutboundConnected()),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Info("Peer summary")
|
||||
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
|
||||
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
|
||||
outboundQUICCount := len(s.peers.OutboundConnectedWithProtocol(peers.QUIC))
|
||||
outboundTCPCount := len(s.peers.OutboundConnectedWithProtocol(peers.TCP))
|
||||
total := inboundQUICCount + inboundTCPCount + outboundQUICCount + outboundTCPCount
|
||||
|
||||
fields := logrus.Fields{
|
||||
"inboundTCP": inboundTCPCount,
|
||||
"outboundTCP": outboundTCPCount,
|
||||
"total": total,
|
||||
}
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
fields["inboundQUIC"] = inboundQUICCount
|
||||
fields["outboundQUIC"] = outboundQUICCount
|
||||
}
|
||||
|
||||
log.WithFields(fields).Info("Connected peers")
|
||||
})
|
||||
|
||||
multiAddrs := s.host.Network().ListenAddresses()
|
||||
@@ -244,9 +261,10 @@ func (s *Service) Start() {
|
||||
|
||||
p2pHostAddress := s.cfg.HostAddress
|
||||
p2pTCPPort := s.cfg.TCPPort
|
||||
p2pQUICPort := s.cfg.QUICPort
|
||||
|
||||
if p2pHostAddress != "" {
|
||||
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort)
|
||||
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort, p2pQUICPort)
|
||||
verifyConnectivity(p2pHostAddress, p2pTCPPort, "tcp")
|
||||
}
|
||||
|
||||
@@ -340,6 +358,15 @@ func (s *Service) ENR() *enr.Record {
|
||||
return s.dv5Listener.Self().Record()
|
||||
}
|
||||
|
||||
// NodeID returns the local node's node ID
|
||||
// for discovery.
|
||||
func (s *Service) NodeID() enode.ID {
|
||||
if s.dv5Listener == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return s.dv5Listener.Self().ID()
|
||||
}
|
||||
|
||||
// DiscoveryAddresses represents our enr addresses as multiaddresses.
|
||||
func (s *Service) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if s.dv5Listener == nil {
|
||||
|
||||
@@ -102,8 +102,9 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
s, err := NewService(context.Background(), cfg)
|
||||
@@ -147,8 +148,9 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
|
||||
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
NoDiscovery: true, // <-- no s.dv5Listener is created
|
||||
ClockWaiter: cs,
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
@@ -34,8 +35,8 @@ var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
// The value used with the subnet, inorder
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// sync subnets from attestation subnets. This is deliberately
|
||||
// chosen as more than 64(attestation subnet count).
|
||||
// sync subnets from others. This is deliberately
|
||||
// chosen as more than 64 (attestation subnet count).
|
||||
const syncLockerVal = 100
|
||||
|
||||
// The value used with the blob sidecar subnet, in order
|
||||
@@ -45,6 +46,13 @@ const syncLockerVal = 100
|
||||
// chosen more than sync and attestation subnet combined.
|
||||
const blobSubnetLockerVal = 110
|
||||
|
||||
// The value used with the data column sidecar subnet, in order
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// data column subnets from others. This is deliberately
|
||||
// chosen more than sync, attestation and blob subnet (6) combined.
|
||||
const dataColumnSubnetVal = 150
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then it tries to connect
|
||||
// with those peers. This method will block until either:
|
||||
@@ -93,6 +101,11 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
@@ -197,6 +210,25 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func initializePersistentColumnSubnets(id enode.ID) error {
|
||||
_, ok, expTime := cache.ColumnSubnetIDs.GetColumnSubnets()
|
||||
if ok && expTime.After(time.Now()) {
|
||||
return nil
|
||||
}
|
||||
subsMap, err := peerdas.CustodyColumnSubnets(id, params.BeaconConfig().CustodyRequirement)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subs := make([]uint64, 0, len(subsMap))
|
||||
for sub := range subsMap {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
|
||||
cache.ColumnSubnetIDs.AddColumnSubnets(subs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]:
|
||||
@@ -346,10 +378,11 @@ func syncBitvector(record *enr.Record) (bitfield.Bitvector4, error) {
|
||||
|
||||
// The subnet locker is a map which keeps track of all
|
||||
// mutexes stored per subnet. This locker is re-used
|
||||
// between both the attestation and sync subnets. In
|
||||
// order to differentiate between attestation and sync
|
||||
// subnets. Sync subnets are stored by (subnet+syncLockerVal). This
|
||||
// is to prevent conflicts while allowing both subnets
|
||||
// between both the attestation, sync and blob subnets.
|
||||
// Sync subnets are stored by (subnet+syncLockerVal).
|
||||
// Blob subnets are stored by (subnet+blobSubnetLockerVal).
|
||||
// Data column subnets are stored by (subnet+dataColumnSubnetVal).
|
||||
// This is to prevent conflicts while allowing subnets
|
||||
// to use a single locker.
|
||||
func (s *Service) subnetLocker(i uint64) *sync.RWMutex {
|
||||
s.subnetsLockLock.Lock()
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
genesisTime := time.Now()
|
||||
|
||||
bootNodeService := &Service{
|
||||
cfg: &Config{TCPPort: 2000, UDPPort: 3000},
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
@@ -89,8 +89,9 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
service, err := NewService(ctx, &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
MaxPeers: 30,
|
||||
TCPPort: uint(2000 + i),
|
||||
UDPPort: uint(3000 + i),
|
||||
UDPPort: uint(2000 + i),
|
||||
TCPPort: uint(3000 + i),
|
||||
QUICPort: uint(3000 + i),
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
@@ -133,8 +134,9 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
MaxPeers: 30,
|
||||
TCPPort: 2010,
|
||||
UDPPort: 3010,
|
||||
UDPPort: 2010,
|
||||
TCPPort: 3010,
|
||||
QUICPort: 3010,
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, cfg)
|
||||
|
||||
@@ -3,6 +3,7 @@ package testing
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/control"
|
||||
@@ -55,6 +56,11 @@ func (_ *FakeP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
func (_ *FakeP2P) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses -- fake
|
||||
func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
@@ -66,7 +72,7 @@ func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ i
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *FakeP2P) RefreshENR() {}
|
||||
func (_ *FakeP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
// LeaveTopic -- fake.
|
||||
func (_ *FakeP2P) LeaveTopic(_ string) error {
|
||||
@@ -148,6 +154,11 @@ func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSideca
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn -- fake.
|
||||
func (_ *FakeP2P) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InterceptPeerDial -- fake.
|
||||
func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
|
||||
@@ -48,6 +48,12 @@ func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.BlobSide
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumn(context.Context, uint64, *ethpb.DataColumnSidecar) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumMessages returns the number of messages broadcasted.
|
||||
func (m *MockBroadcaster) NumMessages() int {
|
||||
m.msgLock.Lock()
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -39,6 +40,11 @@ func (m MockPeerManager) ENR() *enr.Record {
|
||||
return m.Enr
|
||||
}
|
||||
|
||||
// NodeID .
|
||||
func (m MockPeerManager) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses .
|
||||
func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if m.FailDiscoveryAddr {
|
||||
@@ -48,7 +54,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
}
|
||||
|
||||
// RefreshENR .
|
||||
func (_ MockPeerManager) RefreshENR() {}
|
||||
func (_ MockPeerManager) RefreshPersistentSubnets() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
core "github.com/libp2p/go-libp2p/core"
|
||||
@@ -183,6 +184,12 @@ func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.BlobSidecar) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumn(context.Context, uint64, *ethpb.DataColumnSidecar) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStreamHandler for RPC.
|
||||
func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
|
||||
p.BHost.SetStreamHandler(protocol.ID(topic), handler)
|
||||
@@ -263,6 +270,11 @@ func (_ *TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
func (_ *TestP2P) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses --
|
||||
func (_ *TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
@@ -361,7 +373,7 @@ func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ i
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *TestP2P) RefreshENR() {}
|
||||
func (_ *TestP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
// ForkDigest mocks the p2p func.
|
||||
func (p *TestP2P) ForkDigest() ([4]byte, error) {
|
||||
|
||||
@@ -30,6 +30,9 @@ const (
|
||||
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
||||
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
|
||||
GossipBlobSidecarMessage = "blob_sidecar"
|
||||
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
|
||||
GossipDataColumnSidecarMessage = "data_column_sidecar"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
|
||||
@@ -52,4 +55,6 @@ const (
|
||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||
// BlobSubnetTopicFormat is the topic format for the blob subnet.
|
||||
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user