Compare commits

...

48 Commits

Author SHA1 Message Date
nisdas
ed55928153 Fix it For Real 2024-05-14 16:41:50 +08:00
nisdas
c3598c6939 Hack E2E 2024-05-13 22:29:08 +08:00
Nishant Das
f2d956d688 Add DA Check For Data Columns (#13938)
* Add new DA check

* Exit early in the event no commitments exist.

* Gazelle

* Fix Mock Broadcaster

* Fix Test Setup

* Update beacon-chain/blockchain/process_block.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Manu's Review

* Fix Build

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-05-02 15:57:35 +08:00
Manu NALEPA
a5af4d8a90 Implement peer DAS proposer RPC (#13922)
* Remove capital letter from error messages.

* `[4]byte` => `[fieldparams.VersionLength]byte`.

* Prometheus: Remove extra `committee`.

They are probably due to a bad copy/paste.

Note: The name of the probe itself is remaining,
to ensure backward compatibility.

* Implement Proposer RPC for data columns.

* Fix TestProposer_ProposeBlock_OK test.

* Remove default peerDAS activation.

* `validateDataColumn`: Workaround to return a `VerifiedRODataColumn`
2024-04-30 20:45:43 +08:00
Nishant Das
5976319a85 Update .bazelrc (#13931) 2024-04-29 10:17:22 +02:00
Manu NALEPA
3fdb85a37d Implement custody_subnet_count ENR field. (#13915)
https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
2024-04-24 16:04:12 +02:00
Manu NALEPA
6c22814ed1 Peer das core (#13877)
* Bump `c-kzg-4844` lib to the `das` branch.

* Implement `MerkleProofKZGCommitments`.

* Implement `das-core.md`.

* Use `peerdas.CustodyColumnSubnets` and `peerdas.CustodyColumns`.

* `CustodyColumnSubnets`: Include `i` in the for loop.

* Remove `computeSubscribedColumnSubnet`.

* Remove `peerdas.CustodyColumns` out of the for loop.
2024-04-24 15:17:32 +02:00
Nishant Das
f2019dd053 Add Request And Response RPC Methods For Data Columns (#13909)
* Add RPC Handler

* Add Column Requests

* Update beacon-chain/db/filesystem/blob.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Update beacon-chain/p2p/rpc_topic_mappings.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Manu's Review

* Manu's Review

* Interface Fixes

* mock manager

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-04-24 15:17:32 +02:00
Nishant Das
92de4a1274 Add Data Column Gossip Handlers (#13894)
* Add Data Column Subscriber

* Add Data Column Vaidator

* Wire all Handlers In

* Fix Build

* Fix Test

* Fix IP in Test

* Fix IP in Test
2024-04-24 15:17:32 +02:00
Nishant Das
e498183d74 Add Support For Discovery Of Column Subnets (#13883)
* Add Support For Discovery Of Column Subnets

* Lint for SubnetsPerNode

* Manu's Review

* Change to a better name
2024-04-24 15:17:32 +02:00
Nishant Das
9a98e194c0 add in networking params (#13866) 2024-04-24 15:17:32 +02:00
Nishant Das
ebcc1404b6 add it (#13865) 2024-04-24 15:16:21 +02:00
Nishant Das
2bef9563a1 Add in column sidecars protos (#13862) 2024-04-24 15:16:21 +02:00
Manu NALEPA
a6f134e48e Revert "zig: Update zig to recent main branch commit (#13142)" (#13908)
This reverts commit b24b60dbd8.
2024-04-24 03:56:52 +00:00
Preston Van Loon
fdbb5136d9 spectests: fail hard on missing test folders (#13913) 2024-04-24 02:49:33 +00:00
Preston Van Loon
2c66918594 Refactor beacon-chain/core/helpers tests to be black box (#13906) 2024-04-23 15:24:47 +00:00
Manu NALEPA
a0dac292ff Do not remove blobs DB in slasher. (#13881) 2024-04-23 08:09:26 +00:00
terence
75857e7177 Simplify prune invalid by reusing existing fork choice store call (#13878) 2024-04-23 03:09:16 +00:00
Preston Van Loon
0369f70b0b Electra beacon config (#13907)
* Update spectests to v1.5.0

* Add electra config

* Fix tests in beacon-chain/rpc/eth/config

* gofmt
2024-04-22 22:14:57 +00:00
Alec Thomas
276b97530f Change example.org DNS record (#13904)
DNS Changed for this record causing tests to fail

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-04-22 16:36:30 +00:00
Radosław Kapka
d5daf49a9a Use correct port for health check in Beacon API e2e evaluator (#13892) 2024-04-22 16:18:07 +00:00
james-prysm
feb16ae4aa consistent auth token for validator apis (#13747)
* wip

* fixing tests

* adding more tests especially to handle legacy

* fixing linting

* fixing deepsource issues and flags

* fixing some deepsource issues,pathing issues, and logs

* some review items

* adding additional review feedback

* updating to follow updates from https://github.com/ethereum/keymanager-APIs/pull/74

* adjusting functions to match changes in keymanagers PR

* Update validator/rpc/auth_token.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/auth_token.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/auth_token.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* review feedback

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-04-18 16:26:49 +00:00
kasey
219301339c Don't return error that can be internally handled (#13887)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-17 18:28:01 +00:00
Radosław Kapka
aec349f75a Upgrade the Beacon API e2e evaluator (#13868)
* GET

* POST

* Revert "Auxiliary commit to revert individual files from 615feb104004d6a945ededf5862ae38325fc7ec2"

This reverts commit 55cf071c684019f3d6124179154c10b2277fda49.

* comment fix

* deepsource
2024-04-15 05:56:47 +00:00
terence
5f909caedf Remove unused IsViableForCheckpoint (#13879) 2024-04-14 16:38:25 +00:00
Radosław Kapka
ba6dff3adb Return syncing status when node is optimistic (#13875) 2024-04-12 10:24:30 +00:00
Radosław Kapka
8cd05f098b Use read only validators in Beacon API (#13873) 2024-04-12 07:19:40 +00:00
Radosław Kapka
425f5387fa Handle overflow in retention period calculation (#13874) 2024-04-12 07:17:12 +00:00
Nishant Das
f2ce115ade Revert Peer Log Changes (#13872) 2024-04-12 06:49:01 +00:00
kasey
090a3e1ded Fix bug from PR 13827 (#13871)
* fix AS cache bug, tighten ro constructors

* additional coverage on AS cache filter

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-11 23:07:44 +00:00
kasey
c0acb7d352 Backfill throttling (#13855)
* add a sleep between retries as a simple throttle

* unit test

* deepsource

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-11 15:22:29 +00:00
Radosław Kapka
0d6070e6fc Use retention period when fetching blobs (#13869) 2024-04-11 14:06:00 +00:00
Manu NALEPA
bd00f851f0 e2e: Expected log Running node with peerId= -> Running node with. (#13861)
Rationale:
The `FindFollowingTextInFile` seems to have troubles with `logrus` fields.
2024-04-09 07:51:56 +00:00
Nishant Das
1a0c07deec Extend Broadcast Window For Attestations (#13858)
* fix it

* make check better
2024-04-08 04:49:20 +00:00
kasey
04f231a400 Initsync skip local blobs (#13827)
* wip - init-sync skip available blob req

* satisfy deep source

* gaz

* don't need to sort blobs; simplify blobRequest stack

* wip debug log to watch blob skip behavior

* unit tests for new blob req generator

* refactor to reduce blob req func count

* log when WaitForSummarizer fails

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-05 19:09:43 +00:00
Manu NALEPA
be1bfcce63 P2P: Add QUIC support (#13786)
* (Unrelated) DoppelGanger: Improve message.

* `beacon-blocks-by-range`: Add `--network` option.

* `ensurePeerConnections`: Remove capital letter in error message.

* `MultiAddressBuilder{WithID}`: Refactor.

* `buildOptions`: Improve log.

* `NewService`: Bubbles up errors.

* `tcp` ==> `libp2ptcp`

* `multiAddressBuilderWithID`: Add the ability to build QUIC multiaddr

* `p2p Start`: Fix error message.

* `p2p`: Add QUIC support.

* Status: Implement `{Inbound,Outbound}Connected{TCP,QUIC}`.

* Logging: Display the number of TCP/QUIC connected peers.

* P2P: Implement `{Inbound,Outbound}ConnectedWithProtocol`.

* Hide QUIC protocol behind the `--enable-quic` feature flag.

* `e2e`: Add `--enable-quic` flag.

* Add `--enable-quic` in `devModeFlag`.

* `convertToMultiAddrs` ==> `retrieveMultiAddrsFromNode`.

* `convertToAddrInfo`: Ensure `len(infos) == 1`.
2024-04-04 12:21:35 +00:00
Manu NALEPA
8cf5d79852 Remove the Goerli/Prater support. (#13846) 2024-04-03 19:19:17 +00:00
redistay
f7912e7c20 chore: fix some comments (#13843)
Signed-off-by: redistay <wujunjing@outlook.com>
2024-04-02 22:19:15 +00:00
terence
caa8be5dd1 Beacon-api: broadcast blobs in the event of seen block (#13830)
* Beacon-api: broadcast blobs in the event of seen block

* Fix parameters

* Fix test

* Check forkchoice

* Ran go format

* Revert "Ran go format"

This reverts commit 091e77e81d6e2b9861fecc27c0bad1898033f9a3.

* James feedback

* Radek's feedback

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Fix bad tests

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-04-02 18:12:58 +00:00
cui
0c15a30a34 using slices.Index (#13836) 2024-04-02 16:30:05 +00:00
cui
7bce1c0714 using slices.IndexFunc (#13839) 2024-04-02 16:06:27 +00:00
Radosław Kapka
d1084cbe48 Send correct state root with finalization event (#13842) 2024-04-02 15:32:32 +00:00
cui
2cc3f69a3f using slices.Contains (#13835) 2024-04-01 21:26:52 +00:00
cui
a861489a83 using slices.ContainsFunc (#13838) 2024-04-01 21:15:38 +00:00
cui
0e1c585f7d using slices.Contains (#13837) 2024-04-01 21:10:46 +00:00
cui
9df20e616c using slices.IndexFunc (#13834) 2024-04-01 20:04:40 +00:00
kasey
53fdd2d062 allow other pkgs to check for blobs in pruning cache (#13788)
* allow other pkgs to check for blobs in pruning cache

* address deepsource complaints

* custom error to simplify test setup

* add AllAvailable method

* make storage summary slot field private

* unit test and off-by-one fix

* remove comment with copy of tested function

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-04-01 14:19:51 +00:00
Sammy Rosso
2b4bb5d890 Fixed spelling mistakes in comments (#13833) 2024-04-01 11:12:20 +00:00
237 changed files with 5932 additions and 2030 deletions

View File

@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
build --workspace_status_command=./hack/workspace_status.sh
build --define blst_disabled=false
build --compilation_mode=opt
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true

View File

@@ -29,23 +29,7 @@ http_archive(
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
# Temporarily use a nightly build until 0.12.0 is released.
# See: https://github.com/prysmaticlabs/prysm/issues/13130
zig_toolchains(
host_platform_sha256 = {
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
},
url_formats = [
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
],
version = "0.12.0-dev.1349+fa022d1ec",
)
zig_toolchains()
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
@@ -243,7 +227,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0"
consensus_spec_version = "v1.5.0-alpha.0"
bls_test_version = "v0.1.1"
@@ -259,7 +243,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "c282c0f86f23f3d2e0f71f5975769a4077e62a7e3c7382a16bd26a7e589811a0",
sha256 = "33c5547772b6d8d6f041dff7e7d26b0358c2392daed34394a3aa81147812a81c",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -275,7 +259,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "4649c35aa3b8eb0cfdc81bee7c05649f90ef36bede5b0513e1f2e8baf37d6033",
sha256 = "06f286199cf2fedd4700487fb8feb0904e0ae18daaa4b3f70ea430ca9c388167",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -291,7 +275,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "c5a03f724f757456ffaabd2a899992a71d2baf45ee4db65ca3518f2b7ee928c8",
sha256 = "5f2a4452b323075eba6bf950003f7d91fd04ebcbde5bd087beafb5d6f6325ad4",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -306,7 +290,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "cd1c9d97baccbdde1d2454a7dceb8c6c61192a3b581eee12ffc94969f2db8453",
sha256 = "fd7e83e8cbeb3e297f2aeb93776305f7d606272c97834d8d9be673984501ed36",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -342,22 +326,6 @@ filegroup(
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
)
http_archive(
name = "goerli_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"prater/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
)
http_archive(
name = "holesky_testnet",
build_file_content = """

View File

@@ -1,11 +1,24 @@
load("@prysm//tools/go:def.bzl", "go_library")
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"constants.go",
"headers.go",
"jwt.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api",
visibility = ["//visibility:public"],
deps = [
"//crypto/rand:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["jwt_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -4,4 +4,6 @@ const (
WebUrlPrefix = "/v2/validator/"
WebApiUrlPrefix = "/api/v2/validator/"
KeymanagerApiPrefix = "/eth/v1"
AuthTokenFileName = "auth-token"
)

32
api/jwt.go Normal file
View File

@@ -0,0 +1,32 @@
package api
import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
)
// GenerateRandomHexString generates a random hex string that follows the standards for jwt token
// used for beacon node -> execution client
// used for web client -> validator client
func GenerateRandomHexString() (string, error) {
secret := make([]byte, 32)
randGen := rand.NewGenerator()
n, err := randGen.Read(secret)
if err != nil {
return "", err
} else if n != 32 {
return "", errors.New("rand: unexpected length")
}
return hexutil.Encode(secret), nil
}
// ValidateAuthToken validating auth token for web
func ValidateAuthToken(token string) error {
b, err := hexutil.Decode(token)
// token should be hex-encoded and at least 256 bits
if err != nil || len(b) < 32 {
return errors.New("invalid auth token: token should be hex-encoded and at least 256 bits")
}
return nil
}

13
api/jwt_test.go Normal file
View File

@@ -0,0 +1,13 @@
package api
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestGenerateRandomHexString(t *testing.T) {
token, err := GenerateRandomHexString()
require.NoError(t, err)
require.NoError(t, ValidateAuthToken(token))
}

View File

@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
}
}
func TestToogleMultipleTimes(t *testing.T) {
func TestToggleMultipleTimes(t *testing.T) {
t.Parallel()
v := New()
@@ -101,16 +101,16 @@ func TestToogleMultipleTimes(t *testing.T) {
expected := i%2 != 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
}
if pre == v.IsSet() {
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
}
}
}
func TestToogleAfterOverflow(t *testing.T) {
func TestToggleAfterOverflow(t *testing.T) {
t.Parallel()
var value int32 = math.MaxInt32
@@ -122,7 +122,7 @@ func TestToogleAfterOverflow(t *testing.T) {
v.Toggle()
expected := math.MaxInt32%2 == 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
}
// make sure overflow happened
@@ -135,7 +135,7 @@ func TestToogleAfterOverflow(t *testing.T) {
v.Toggle()
expected = !expected
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
}
}

View File

@@ -20,6 +20,7 @@ package event
import (
"errors"
"reflect"
"slices"
"sync"
)
@@ -219,12 +220,9 @@ type caseList []reflect.SelectCase
// find returns the index of a case containing the given channel.
func (cs caseList) find(channel interface{}) int {
for i, cas := range cs {
if cas.Chan.Interface() == channel {
return i
}
}
return -1
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
return selectCase.Chan.Interface() == channel
})
}
// delete removes the given case from cs.

View File

@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
return results, nil
}
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
func calculateChunkSize(items int) int {
// Start with a simple even split
chunkSize := items / runtime.GOMAXPROCS(0)

View File

@@ -26,6 +26,7 @@ go_library(
"receive_attestation.go",
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"service.go",
"tracked_proposer.go",
"weak_subjectivity_checks.go",
@@ -48,6 +49,7 @@ go_library(
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -157,6 +159,7 @@ go_test(
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -399,14 +398,6 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
@@ -518,13 +509,6 @@ func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slo
return ar[:], nil
}
// SetOptimisticToInvalid wraps the corresponding method in forkchoice
func (s *Service) SetOptimisticToInvalid(ctx context.Context, root, parent, lvh [32]byte) ([][32]byte, error) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parent, lvh)
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t

View File

@@ -33,6 +33,7 @@ var (
)
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
// An invalid block is the block that fails state transition based on the core protocol rules.
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.

View File

@@ -256,7 +256,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}

View File

@@ -61,7 +61,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
require.NotEqual(t, 0, len(indices))
}

View File

@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
func WithP2PBroadcaster(p p2p.Acceser) Option {
return func(s *Service) error {
s.cfg.P2p = p
s.cfg.P2P = p
return nil
}
}

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -514,12 +515,35 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
return missing, nil
}
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
if len(expected) == 0 {
return nil, nil
}
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
return nil, errMaxDataColumnsExceeded
}
indices, err := bs.ColumnIndices(root)
if err != nil {
return nil, err
}
missing := make(map[uint64]bool, len(expected))
for col := range expected {
if !indices[col] {
missing[col] = true
}
}
return missing, nil
}
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
// The function will first check the database to see if all sidecars have been persisted. If any
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
if features.Get().EnablePeerDAS {
return s.isDataAvailableDataColumns(ctx, root, signed)
}
if signed.Version() < version.Deneb {
return nil
}
@@ -591,6 +615,86 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
}
}
func (s *Service) isDataAvailableDataColumns(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
if signed.Version() < version.Deneb {
return nil
}
block := signed.Block()
if block == nil {
return errors.New("invalid nil beacon block")
}
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
return nil
}
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "could not get KZG commitments")
}
// If block has not commitments there is nothing to wait for.
if len(kzgCommitments) == 0 {
return nil
}
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), params.BeaconConfig().CustodyRequirement)
if err != nil {
return err
}
// expected is the number of custodied data columnns a node is expected to have.
expected := len(colMap)
if expected == 0 {
return nil
}
// get a map of data column indices that are not currently available.
missing, err := missingDataColumns(s.blobStorage, root, colMap)
if err != nil {
return err
}
// If there are no missing indices, all data column sidecars are available.
if len(missing) == 0 {
return nil
}
// The gossip handler for data columns writes the index of each verified data column referencing the given
// root to the channel returned by blobNotifiers.forRoot.
nc := s.blobNotifiers.forRoot(root)
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
if len(missing) == 0 {
return
}
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
Error("Still waiting for DA check at slot end.")
})
defer nst.Stop()
}
for {
select {
case idx := <-nc:
// Delete each index seen in the notification channel.
delete(missing, idx)
// Read from the channel until there are no more missing sidecars.
if len(missing) > 0 {
continue
}
// Once all sidecars have been observed, clean up the notification channel.
s.blobNotifiers.delete(root)
return nil
case <-ctx.Done():
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
}
}
}
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,

View File

@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
// logNonCanonicalBlockReceived prints a message informing that the received
// block is not the head of the chain. It requires the caller holds a lock on
// Foprkchoice.
// Forkchoice.
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {

View File

@@ -50,6 +50,12 @@ type BlobReceiver interface {
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
}
// DataColumnReceiver interface defines the methods of chain service for receiving new
// data columns
type DataColumnReceiver interface {
ReceiveDataColumn(context.Context, blocks.VerifiedRODataColumn) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
@@ -170,7 +176,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Send finalized events and finalized deposits in the background
if newFinalized {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
go s.sendNewFinalizedEvent(blockCopy, postState)
go s.sendNewFinalizedEvent(ctx, postState)
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDeposits(depCtx, finalized.Root)
@@ -443,7 +449,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
// event feed. It needs to be called on the background
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.BeaconState) {
isValidPayload := false
s.headLock.RLock()
if s.head != nil {
@@ -451,8 +457,17 @@ func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBl
}
s.headLock.RUnlock()
blk, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
if err != nil {
log.WithError(err).Error("Could not retrieve block for finalized checkpoint root. Finalized event will not be emitted")
return
}
if blk == nil || blk.IsNil() || blk.Block() == nil || blk.Block().IsNil() {
log.WithError(err).Error("Block retrieved for finalized checkpoint root is nil. Finalized event will not be emitted")
return
}
stateRoot := blk.Block().StateRoot()
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{
@@ -488,7 +503,10 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
if err != nil {
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
s.cfg.ForkChoiceStore.Unlock()
return false, err
}
if signed.Version() < version.Capella && isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {

View File

@@ -8,12 +8,14 @@ import (
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -378,3 +380,38 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
require.Equal(t, false, pool.ValidatorExists(idx))
})
}
func Test_sendNewFinalizedEvent(t *testing.T) {
s, _ := minimalTestService(t)
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
s.cfg.StateNotifier = notifier
finalizedSt, err := util.NewBeaconState()
require.NoError(t, err)
finalizedStRoot, err := finalizedSt.HashTreeRoot(s.ctx)
require.NoError(t, err)
b := util.NewBeaconBlock()
b.Block.StateRoot = finalizedStRoot[:]
sbb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
sbbRoot, err := sbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(s.ctx, sbb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: 123,
Root: sbbRoot[:],
}))
s.sendNewFinalizedEvent(s.ctx, st)
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
require.Equal(t, true, ok, "event has wrong data type")
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
assert.DeepEqual(t, sbbRoot[:], fc.Block)
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
assert.Equal(t, false, fc.ExecutionOptimistic)
}

View File

@@ -0,0 +1,18 @@
package blockchain
import (
"context"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
)
func (s *Service) ReceiveDataColumn(ctx context.Context, ds blocks.VerifiedRODataColumn) error {
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
return err
}
// TODO use a custom event or new method of for data columns. For speed
// we are simply reusing blob paths here.
s.sendNewBlobEvent(ds.BlockRoot(), uint64(ds.SignedBlockHeader.Header.Slot))
return nil
}

View File

@@ -82,7 +82,7 @@ type config struct {
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2p p2p.Broadcaster
P2P p2p.Acceser
MaxRoutines int
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer

View File

@@ -95,7 +95,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithAttestationPool(attestations.NewPool()),
WithSlashingPool(slashings.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithP2PBroadcaster(&mockAccesser{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fc),
WithAttestationService(attService),

View File

@@ -18,6 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -44,6 +45,11 @@ type mockBroadcaster struct {
broadcastCalled bool
}
type mockAccesser struct {
mockBroadcaster
p2pTesting.MockPeerManager
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
mb.broadcastCalled = true
return nil
@@ -64,6 +70,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}

View File

@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
return nil
}
// ReceiveDataColumn implements the same method in chain service
func (c *ChainService) ReceiveDataColumn(_ context.Context, _ blocks.VerifiedRODataColumn) error {
return nil
}
// TargetRootForEpoch mocks the same method in the chain service
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -7,6 +7,7 @@ go_library(
"active_balance_disabled.go", # keep
"attestation_data.go",
"checkpoint_state.go",
"column_subnet_ids.go",
"committee.go",
"committee_disabled.go", # keep
"committees.go",

65
beacon-chain/cache/column_subnet_ids.go vendored Normal file
View File

@@ -0,0 +1,65 @@
package cache
import (
"sync"
"time"
"github.com/patrickmn/go-cache"
"github.com/prysmaticlabs/prysm/v5/config/params"
)
type columnSubnetIDs struct {
colSubCache *cache.Cache
colSubLock sync.RWMutex
}
// ColumnSubnetIDs for column subnet participants
var ColumnSubnetIDs = newColumnSubnetIDs()
const columnKey = "columns"
func newColumnSubnetIDs() *columnSubnetIDs {
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
// Set the default duration of a column subnet subscription as the column expiry period.
subLength := epochDuration * time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
return &columnSubnetIDs{colSubCache: persistentCache}
}
// GetColumnSubnets retrieves the data column subnets.
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
s.colSubLock.RLock()
defer s.colSubLock.RUnlock()
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
if !ok {
return nil, false, time.Time{}
}
// Retrieve indices from the cache.
idxs, ok := id.([]uint64)
if !ok {
return nil, false, time.Time{}
}
return idxs, ok, duration
}
// AddColumnSubnets adds the relevant data column subnets.
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
s.colSubLock.Lock()
defer s.colSubLock.Unlock()
s.colSubCache.Set(columnKey, colIdx, 0)
}
// EmptyAllCaches empties out all the related caches and flushes any stored
// entries on them. This should only ever be used for testing, in normal
// production, handling of the relevant subnets for each role is done
// separately.
func (s *columnSubnetIDs) EmptyAllCaches() {
// Clear the cache.
s.colSubLock.Lock()
defer s.colSubLock.Unlock()
s.colSubCache.Flush()
}

View File

@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a non-sensical ordering
// Perform this in a nonsensical ordering
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10, [32]byte{}, 0))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0))

View File

@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a non-sensical ordering
// Perform this in a nonsensical ordering
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)

View File

@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
currentEpoch := slots.ToEpoch(header.Header.Slot)
fork, err := forks.Fork(currentEpoch)
if err != nil {
return err
}
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
// from the above method by not using fork data from the state and instead retrieving it
// via the respective epoch.

View File

@@ -32,6 +32,9 @@ const (
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
AttesterSlashingReceived = 8
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
DataColumnSidecarReceived = 9
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -77,3 +80,7 @@ type ProposerSlashingReceivedData struct {
type AttesterSlashingReceivedData struct {
AttesterSlashing *ethpb.AttesterSlashing
}
type DataColumnSidecarReceivedData struct {
DataColumn *blocks.VerifiedRODataColumn
}

View File

@@ -50,6 +50,8 @@ go_test(
"attestation_test.go",
"beacon_committee_test.go",
"block_test.go",
"private_access_fuzz_noop_test.go", # keep
"private_access_test.go",
"randao_test.go",
"rewards_penalties_test.go",
"shuffle_test.go",

View File

@@ -140,7 +140,7 @@ func BeaconCommittee(
}
count := committeesPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
return computeCommittee(validatorIndices, seed, indexOffset, count)
return ComputeCommittee(validatorIndices, seed, indexOffset, count)
}
// CommitteeAssignmentContainer represents a committee list, committee index, and to be attested slot for a given epoch.
@@ -359,7 +359,7 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch)
if err != nil {
return err
}
@@ -409,7 +409,7 @@ func ClearCache() {
balanceCache.Clear()
}
// computeCommittee returns the requested shuffled committee out of the total committees using
// ComputeCommittee returns the requested shuffled committee out of the total committees using
// validator indices and seed.
//
// Spec pseudocode definition:
@@ -424,7 +424,7 @@ func ClearCache() {
// start = (len(indices) * index) // count
// end = (len(indices) * uint64(index + 1)) // count
// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
func computeCommittee(
func ComputeCommittee(
indices []primitives.ValidatorIndex,
seed [32]byte,
index, count uint64,
@@ -451,9 +451,9 @@ func computeCommittee(
return shuffledList[start:end], nil
}
// This computes proposer indices of the current epoch and returns a list of proposer indices,
// PrecomputeProposerIndices computes proposer indices of the current epoch and returns a list of proposer indices,
// the index of the list represents the slot number.
func precomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
func PrecomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
hashFunc := hash.CustomSHA256Hasher()
proposerIndices := make([]primitives.ValidatorIndex, params.BeaconConfig().SlotsPerEpoch)

View File

@@ -1,4 +1,4 @@
package helpers
package helpers_test
import (
"context"
@@ -7,6 +7,7 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -21,7 +22,7 @@ import (
)
func TestComputeCommittee_WithoutCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
// Create 10 committees
committeeCount := uint64(10)
@@ -48,16 +49,16 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
require.NoError(t, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(t, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
committees, err := computeCommittee(indices, seed, 0, 1 /* Total committee*/)
committees, err := helpers.ComputeCommittee(indices, seed, 0, 1 /* Total committee*/)
assert.NoError(t, err, "Could not compute committee")
// Test shuffled indices are correct for index 5 committee
index := uint64(5)
committee5, err := computeCommittee(indices, seed, index, committeeCount)
committee5, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
assert.NoError(t, err, "Could not compute committee")
start := slice.SplitOffset(validatorCount, committeeCount, index)
end := slice.SplitOffset(validatorCount, committeeCount, index+1)
@@ -65,7 +66,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
// Test shuffled indices are correct for index 9 committee
index = uint64(9)
committee9, err := computeCommittee(indices, seed, index, committeeCount)
committee9, err := helpers.ComputeCommittee(indices, seed, index, committeeCount)
assert.NoError(t, err, "Could not compute committee")
start = slice.SplitOffset(validatorCount, committeeCount, index)
end = slice.SplitOffset(validatorCount, committeeCount, index+1)
@@ -73,42 +74,42 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
}
func TestComputeCommittee_RegressionTest(t *testing.T) {
ClearCache()
helpers.ClearCache()
indices := []primitives.ValidatorIndex{1, 3, 8, 16, 18, 19, 20, 23, 30, 35, 43, 46, 47, 54, 56, 58, 69, 70, 71, 83, 84, 85, 91, 96, 100, 103, 105, 106, 112, 121, 127, 128, 129, 140, 142, 144, 146, 147, 149, 152, 153, 154, 157, 160, 173, 175, 180, 182, 188, 189, 191, 194, 201, 204, 217, 221, 226, 228, 230, 231, 239, 241, 249, 250, 255}
seed := [32]byte{68, 110, 161, 250, 98, 230, 161, 172, 227, 226, 99, 11, 138, 124, 201, 134, 38, 197, 0, 120, 6, 165, 122, 34, 19, 216, 43, 226, 210, 114, 165, 183}
index := uint64(215)
count := uint64(32)
_, err := computeCommittee(indices, seed, index, count)
_, err := helpers.ComputeCommittee(indices, seed, index, count)
require.ErrorContains(t, "index out of range", err)
}
func TestVerifyBitfieldLength_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
bf := bitfield.Bitlist{0xFF, 0x01}
committeeSize := uint64(8)
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
bf = bitfield.Bitlist{0xFF, 0x07}
committeeSize = 10
assert.NoError(t, VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
}
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
ClearCache()
helpers.ClearCache()
epoch := primitives.Epoch(1)
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 0, // Epoch 0.
})
require.NoError(t, err)
_, _, err = CommitteeAssignments(context.Background(), state, epoch+1)
_, _, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1)
assert.ErrorContains(t, "can't be greater than next epoch", err)
}
func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
@@ -127,7 +128,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, 0)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
for _, ss := range proposerIndexToSlots {
for _, s := range ss {
@@ -198,9 +199,9 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
ClearCache()
helpers.ClearCache()
validatorIndexToCommittee, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
validatorIndexToCommittee, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
require.NoError(t, err, "Failed to determine CommitteeAssignments")
cac := validatorIndexToCommittee[tt.index]
assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index)
@@ -215,7 +216,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
}
func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
ClearCache()
helpers.ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -237,17 +238,17 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, proposerIndxs, err := CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
_, proposerIndxs, err := helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
_, proposerIndxs, err = helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
}
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
ClearCache()
helpers.ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -263,12 +264,12 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, _, err = CommitteeAssignments(context.Background(), state, 0)
_, _, err = helpers.CommitteeAssignments(context.Background(), state, 0)
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
ClearCache()
helpers.ClearCache()
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
@@ -285,7 +286,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
})
require.NoError(t, err)
epoch := primitives.Epoch(1)
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, epoch)
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, epoch)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
slotsWithProposers := make(map[primitives.Slot]bool)
@@ -391,10 +392,10 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
}
for i, tt := range tests {
ClearCache()
helpers.ClearCache()
require.NoError(t, state.SetSlot(tt.stateSlot))
err := VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
err := helpers.VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
if tt.verificationFailure {
assert.NotNil(t, err, "Verification succeeded when it was supposed to fail")
} else {
@@ -404,7 +405,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
}
func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
ClearCache()
helpers.ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
@@ -421,20 +422,20 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
epoch := primitives.Epoch(0)
idx := primitives.CommitteeIndex(1)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
indices, err = committeeCache.Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
indices, err = helpers.CommitteeCache().Committee(context.Background(), params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)), seed, idx)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
}
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
ClearCache()
helpers.ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
@@ -452,19 +453,19 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
})
require.NoError(t, err)
e := time.CurrentEpoch(state)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e))
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(seed[:])))
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
nextSeed, err := helpers.Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
require.Equal(t, false, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
require.NoError(t, helpers.UpdateCommitteeCache(context.Background(), state, e+1))
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
require.Equal(t, true, helpers.CommitteeCache().HasEntry(string(nextSeed[:])))
}
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
@@ -481,20 +482,20 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
index := uint64(3)
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -515,20 +516,20 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
index := uint64(3)
_, err = computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err = helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -549,9 +550,9 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
i := uint64(0)
@@ -559,7 +560,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -584,9 +585,9 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
i := uint64(0)
@@ -594,7 +595,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -619,9 +620,9 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
require.NoError(b, err)
epoch := time.CurrentEpoch(state)
indices, err := ActiveValidatorIndices(context.Background(), state, epoch)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, epoch)
require.NoError(b, err)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(b, err)
i := uint64(0)
@@ -629,7 +630,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
i++
_, err := computeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
panic(err)
}
@@ -655,13 +656,13 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, err = BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
_, err = helpers.BeaconCommitteeFromState(context.Background(), state, 1 /* previous epoch */, 0)
require.NoError(t, err)
// Verify previous epoch is cached
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
activeIndices, err := committeeCache.ActiveIndices(context.Background(), seed)
activeIndices, err := helpers.CommitteeCache().ActiveIndices(context.Background(), seed)
require.NoError(t, err)
assert.NotNil(t, activeIndices, "Did not cache active indices")
}
@@ -680,19 +681,19 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
})
require.NoError(t, err)
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
proposerIndices, err := precomputeProposerIndices(state, indices, time.CurrentEpoch(state))
proposerIndices, err := helpers.PrecomputeProposerIndices(state, indices, time.CurrentEpoch(state))
require.NoError(t, err)
var wantedProposerIndices []primitives.ValidatorIndex
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
require.NoError(t, err)
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
seedWithSlotHash := hash.Hash(seedWithSlot)
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
require.NoError(t, err)
wantedProposerIndices = append(wantedProposerIndices, index)
}

View File

@@ -0,0 +1,17 @@
//go:build fuzz
package helpers
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
func CommitteeCache() *cache.FakeCommitteeCache {
return committeeCache
}
func SyncCommitteeCache() *cache.FakeSyncCommitteeCache {
return syncCommitteeCache
}
func ProposerIndicesCache() *cache.FakeProposerIndicesCache {
return proposerIndicesCache
}

View File

@@ -0,0 +1,17 @@
//go:build !fuzz
package helpers
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
func CommitteeCache() *cache.CommitteeCache {
return committeeCache
}
func SyncCommitteeCache() *cache.SyncCommitteeCache {
return syncCommitteeCache
}
func ProposerIndicesCache() *cache.ProposerIndicesCache {
return proposerIndicesCache
}

View File

@@ -1,9 +1,10 @@
package helpers
package helpers_test
import (
"encoding/binary"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -40,10 +41,10 @@ func TestRandaoMix_OK(t *testing.T) {
},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := RandaoMix(state, test.epoch)
mix, err := helpers.RandaoMix(state, test.epoch)
require.NoError(t, err)
assert.DeepEqual(t, test.randaoMix, mix, "Incorrect randao mix")
}
@@ -76,10 +77,10 @@ func TestRandaoMix_CopyOK(t *testing.T) {
},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(test.epoch+1))))
mix, err := RandaoMix(state, test.epoch)
mix, err := helpers.RandaoMix(state, test.epoch)
require.NoError(t, err)
uniqueNumber := uint64(params.BeaconConfig().EpochsPerHistoricalVector.Add(1000))
binary.LittleEndian.PutUint64(mix, uniqueNumber)
@@ -92,7 +93,7 @@ func TestRandaoMix_CopyOK(t *testing.T) {
}
func TestGenerateSeed_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
for i := 0; i < len(randaoMixes); i++ {
@@ -107,7 +108,7 @@ func TestGenerateSeed_OK(t *testing.T) {
})
require.NoError(t, err)
got, err := Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
got, err := helpers.Seed(state, 10, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
wanted := [32]byte{102, 82, 23, 40, 226, 79, 171, 11, 203, 23, 175, 7, 88, 202, 80,

View File

@@ -22,7 +22,7 @@ var balanceCache = cache.NewEffectiveBalanceCache()
// """
// Return the combined effective balance of the ``indices``.
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
// Math safe up to ~10B ETH, after which this overflows uint64.
// """
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
func TotalBalance(state state.ReadOnlyValidators, indices []primitives.ValidatorIndex) uint64 {

View File

@@ -1,9 +1,10 @@
package helpers
package helpers_test
import (
"math"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -14,7 +15,7 @@ import (
)
func TestTotalBalance_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{
{EffectiveBalance: 27 * 1e9}, {EffectiveBalance: 28 * 1e9},
@@ -22,19 +23,19 @@ func TestTotalBalance_OK(t *testing.T) {
}})
require.NoError(t, err)
balance := TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{0, 1, 2, 3})
wanted := state.Validators()[0].EffectiveBalance + state.Validators()[1].EffectiveBalance +
state.Validators()[2].EffectiveBalance + state.Validators()[3].EffectiveBalance
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
}
func TestTotalBalance_ReturnsEffectiveBalanceIncrement(t *testing.T) {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: []*ethpb.Validator{}})
require.NoError(t, err)
balance := TotalBalance(state, []primitives.ValidatorIndex{})
balance := helpers.TotalBalance(state, []primitives.ValidatorIndex{})
wanted := params.BeaconConfig().EffectiveBalanceIncrement
assert.Equal(t, wanted, balance, "Incorrect TotalBalance")
}
@@ -51,7 +52,7 @@ func TestGetBalance_OK(t *testing.T) {
{i: 2, b: []uint64{0, 0, 0}},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Balances: test.b})
require.NoError(t, err)
@@ -68,7 +69,7 @@ func TestTotalActiveBalance(t *testing.T) {
{10000},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
@@ -76,7 +77,7 @@ func TestTotalActiveBalance(t *testing.T) {
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := TotalActiveBalance(state)
bal, err := helpers.TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, uint64(test.vCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
}
@@ -91,7 +92,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
{10000},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
@@ -99,7 +100,7 @@ func TestTotalActiveBal_ReturnMin(t *testing.T) {
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := TotalActiveBalance(state)
bal, err := helpers.TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, bal)
}
@@ -115,7 +116,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
{vCount: 10000, wantCount: 10000},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, 0)
for i := 0; i < test.vCount; i++ {
@@ -123,7 +124,7 @@ func TestTotalActiveBalance_WithCache(t *testing.T) {
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
bal, err := TotalActiveBalance(state)
bal, err := helpers.TotalActiveBalance(state)
require.NoError(t, err)
require.Equal(t, uint64(test.wantCount)*params.BeaconConfig().MaxEffectiveBalance, bal)
}
@@ -141,7 +142,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
{i: 2, b: []uint64{27 * 1e9, 28 * 1e9, 32 * 1e9}, nb: 33 * 1e9, eb: 65 * 1e9},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
@@ -149,7 +150,7 @@ func TestIncreaseBalance_OK(t *testing.T) {
Balances: test.b,
})
require.NoError(t, err)
require.NoError(t, IncreaseBalance(state, test.i, test.nb))
require.NoError(t, helpers.IncreaseBalance(state, test.i, test.nb))
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
}
}
@@ -167,7 +168,7 @@ func TestDecreaseBalance_OK(t *testing.T) {
{i: 3, b: []uint64{27 * 1e9, 28 * 1e9, 1, 28 * 1e9}, nb: 28 * 1e9, eb: 0},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
@@ -175,13 +176,13 @@ func TestDecreaseBalance_OK(t *testing.T) {
Balances: test.b,
})
require.NoError(t, err)
require.NoError(t, DecreaseBalance(state, test.i, test.nb))
require.NoError(t, helpers.DecreaseBalance(state, test.i, test.nb))
assert.Equal(t, test.eb, state.Balances()[test.i], "Incorrect Validator balance")
}
}
func TestFinalityDelay(t *testing.T) {
ClearCache()
helpers.ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
@@ -195,25 +196,25 @@ func TestFinalityDelay(t *testing.T) {
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
}
setVal()
d := FinalityDelay(prevEpoch, finalizedEpoch)
d := helpers.FinalityDelay(prevEpoch, finalizedEpoch)
w := time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
setVal()
d = FinalityDelay(prevEpoch, finalizedEpoch)
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
setVal()
d = FinalityDelay(prevEpoch, finalizedEpoch)
d = helpers.FinalityDelay(prevEpoch, finalizedEpoch)
w = time.PrevEpoch(beaconState) - beaconState.FinalizedCheckpointEpoch()
assert.Equal(t, w, d, "Did not get wanted finality delay")
}
func TestIsInInactivityLeak(t *testing.T) {
ClearCache()
helpers.ClearCache()
base := buildState(params.BeaconConfig().SlotsPerEpoch*10, 1)
base.FinalizedCheckpoint = &ethpb.Checkpoint{Epoch: 3}
@@ -227,13 +228,13 @@ func TestIsInInactivityLeak(t *testing.T) {
finalizedEpoch = beaconState.FinalizedCheckpointEpoch()
}
setVal()
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 4}))
setVal()
assert.Equal(t, true, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
assert.Equal(t, true, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak true")
require.NoError(t, beaconState.SetFinalizedCheckpoint(&ethpb.Checkpoint{Epoch: 5}))
setVal()
assert.Equal(t, false, IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
assert.Equal(t, false, helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch), "Wanted inactivity leak false")
}
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
@@ -285,7 +286,7 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
{i: 2, b: []uint64{math.MaxUint64, math.MaxUint64, math.MaxUint64}, nb: 33 * 1e9},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{
@@ -293,6 +294,6 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
Balances: test.b,
})
require.NoError(t, err)
require.ErrorContains(t, "addition overflows", IncreaseBalance(state, test.i, test.nb))
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
}
}

View File

@@ -26,7 +26,7 @@ var (
// 1. Checks if the public key exists in the sync committee cache
// 2. If 1 fails, checks if the public key exists in the input current sync committee object
func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.ValidatorIndex) (bool, error) {
root, err := syncPeriodBoundaryRoot(st)
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return false, err
}
@@ -63,7 +63,7 @@ func IsCurrentPeriodSyncCommittee(st state.BeaconState, valIdx primitives.Valida
func IsNextPeriodSyncCommittee(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) (bool, error) {
root, err := syncPeriodBoundaryRoot(st)
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return false, err
}
@@ -90,7 +90,7 @@ func IsNextPeriodSyncCommittee(
func CurrentPeriodSyncSubcommitteeIndices(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) ([]primitives.CommitteeIndex, error) {
root, err := syncPeriodBoundaryRoot(st)
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
@@ -124,7 +124,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
func NextPeriodSyncSubcommitteeIndices(
st state.BeaconState, valIdx primitives.ValidatorIndex,
) ([]primitives.CommitteeIndex, error) {
root, err := syncPeriodBoundaryRoot(st)
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
@@ -182,10 +182,10 @@ func findSubCommitteeIndices(pubKey []byte, pubKeys [][]byte) []primitives.Commi
return indices
}
// Retrieve the current sync period boundary root by calculating sync period start epoch
// SyncPeriodBoundaryRoot computes the current sync period boundary root by calculating sync period start epoch
// and calling `BlockRoot`.
// It uses the boundary slot - 1 for block root. (Ex: SlotsPerEpoch * EpochsPerSyncCommitteePeriod - 1)
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
func SyncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
// Can't call `BlockRoot` until the first slot.
if st.Slot() == params.BeaconConfig().GenesisSlot {
return params.BeaconConfig().ZeroHash, nil

View File

@@ -1,4 +1,4 @@
package helpers
package helpers_test
import (
"math/rand"
@@ -7,6 +7,7 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -17,7 +18,7 @@ import (
)
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -40,15 +41,15 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -70,13 +71,13 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsCurrentPeriodSyncCommittee(state, 0)
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -98,13 +99,13 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
ok, err := helpers.IsCurrentPeriodSyncCommittee(state, 12390192)
require.ErrorContains(t, "validator index 12390192 does not exist", err)
require.Equal(t, false, ok)
}
func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -127,15 +128,15 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
ok, err := IsNextPeriodSyncCommittee(state, 0)
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -157,13 +158,13 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsNextPeriodSyncCommittee(state, 0)
ok, err := helpers.IsNextPeriodSyncCommittee(state, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
}
func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -185,13 +186,13 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
ok, err := helpers.IsNextPeriodSyncCommittee(state, 120391029)
require.ErrorContains(t, "validator index 120391029 does not exist", err)
require.Equal(t, false, ok)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -214,15 +215,15 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -243,27 +244,27 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
root, err := syncPeriodBoundaryRoot(state)
root, err := helpers.SyncPeriodBoundaryRoot(state)
require.NoError(t, err)
// Test that cache was empty.
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
_, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
// Test that helper can retrieve the index given empty cache.
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 0)
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
// Test that cache was able to fill on miss.
time.Sleep(100 * time.Millisecond)
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
index, err = helpers.SyncCommitteeCache().CurrentPeriodIndexPosition(root, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -285,13 +286,13 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
index, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
require.ErrorContains(t, "validator index 129301923 does not exist", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -314,15 +315,15 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
r := [32]byte{'a'}
require.NoError(t, err, syncCommitteeCache.UpdatePositionsInCommittee(r, state))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee(r, state))
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -344,13 +345,13 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := NextPeriodSyncSubcommitteeIndices(state, 0)
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 0)
require.NoError(t, err)
require.DeepEqual(t, []primitives.CommitteeIndex{0}, index)
}
func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -372,43 +373,43 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
index, err := helpers.NextPeriodSyncSubcommitteeIndices(state, 21093019)
require.ErrorContains(t, "validator index 21093019 does not exist", err)
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
}
func TestUpdateSyncCommitteeCache_BadSlot(t *testing.T) {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: 1,
})
require.NoError(t, err)
err = UpdateSyncCommitteeCache(state)
err = helpers.UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "not at the end of the epoch to update cache", err)
state, err = state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: params.BeaconConfig().SlotsPerEpoch - 1,
})
require.NoError(t, err)
err = UpdateSyncCommitteeCache(state)
err = helpers.UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "not at sync committee period boundary to update cache", err)
}
func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
ClearCache()
helpers.ClearCache()
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Slot: primitives.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1,
LatestBlockHeader: &ethpb.BeaconBlockHeader{StateRoot: params.BeaconConfig().ZeroHash[:]},
})
require.NoError(t, err)
err = UpdateSyncCommitteeCache(state)
err = helpers.UpdateSyncCommitteeCache(state)
require.ErrorContains(t, "zero hash state root can't be used to update cache", err)
}
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -435,7 +436,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
comIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
require.NoError(t, err)
wantedSlot := params.BeaconConfig().EpochsPerSyncCommitteePeriod.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
@@ -446,7 +447,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
syncCommittee.Pubkeys[i], syncCommittee.Pubkeys[j] = syncCommittee.Pubkeys[j], syncCommittee.Pubkeys[i]
})
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
newIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
newIdxs, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, 200)
require.NoError(t, err)
require.DeepNotEqual(t, comIdxs, newIdxs)
}

View File

@@ -1,4 +1,4 @@
package helpers
package helpers_test
import (
"context"
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
@@ -32,7 +33,7 @@ func TestIsActiveValidator_OK(t *testing.T) {
}
for _, test := range tests {
validator := &ethpb.Validator{ActivationEpoch: 10, ExitEpoch: 100}
assert.Equal(t, test.b, IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
assert.Equal(t, test.b, helpers.IsActiveValidator(validator, test.a), "IsActiveValidator(%d)", test.a)
}
}
@@ -53,7 +54,7 @@ func TestIsActiveValidatorUsingTrie_OK(t *testing.T) {
for _, test := range tests {
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
assert.Equal(t, test.b, IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
assert.Equal(t, test.b, helpers.IsActiveValidatorUsingTrie(readOnlyVal, test.a), "IsActiveValidatorUsingTrie(%d)", test.a)
}
}
@@ -81,7 +82,7 @@ func TestIsActiveNonSlashedValidatorUsingTrie_OK(t *testing.T) {
require.NoError(t, err)
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
assert.Equal(t, test.b, IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
assert.Equal(t, test.b, helpers.IsActiveNonSlashedValidatorUsingTrie(readOnlyVal, test.a), "IsActiveNonSlashedValidatorUsingTrie(%d)", test.a)
}
}
@@ -161,7 +162,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
t.Run("without trie", func(t *testing.T) {
slashableValidator := IsSlashableValidator(test.validator.ActivationEpoch,
slashableValidator := helpers.IsSlashableValidator(test.validator.ActivationEpoch,
test.validator.WithdrawableEpoch, test.validator.Slashed, test.epoch)
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
})
@@ -170,7 +171,7 @@ func TestIsSlashableValidator_OK(t *testing.T) {
require.NoError(t, err)
readOnlyVal, err := beaconState.ValidatorAtIndexReadOnly(0)
require.NoError(t, err)
slashableValidator := IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
slashableValidator := helpers.IsSlashableValidatorUsingTrie(readOnlyVal, test.epoch)
assert.Equal(t, test.slashable, slashableValidator, "Expected active validator slashable to be %t", test.slashable)
})
})
@@ -223,17 +224,17 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
}
for _, tt := range tests {
ClearCache()
helpers.ClearCache()
require.NoError(t, state.SetSlot(tt.slot))
result, err := BeaconProposerIndex(context.Background(), state)
result, err := helpers.BeaconProposerIndex(context.Background(), state)
require.NoError(t, err, "Failed to get shard and committees at slot")
assert.Equal(t, tt.index, result, "Result index was an unexpected value")
}
}
func TestBeaconProposerIndex_BadState(t *testing.T) {
ClearCache()
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig()
@@ -261,12 +262,12 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
// Set a very high slot, so that retrieved block root will be
// non existent for the proposer cache.
require.NoError(t, state.SetSlot(100))
_, err = BeaconProposerIndex(context.Background(), state)
_, err = helpers.BeaconProposerIndex(context.Background(), state)
require.NoError(t, err)
}
func TestComputeProposerIndex_Compatibility(t *testing.T) {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
@@ -281,22 +282,22 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
})
require.NoError(t, err)
indices, err := ActiveValidatorIndices(context.Background(), state, 0)
indices, err := helpers.ActiveValidatorIndices(context.Background(), state, 0)
require.NoError(t, err)
var proposerIndices []primitives.ValidatorIndex
seed, err := Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
seed, err := helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
require.NoError(t, err)
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
seedWithSlotHash := hash.Hash(seedWithSlot)
index, err := ComputeProposerIndex(state, indices, seedWithSlotHash)
index, err := helpers.ComputeProposerIndex(state, indices, seedWithSlotHash)
require.NoError(t, err)
proposerIndices = append(proposerIndices, index)
}
var wantedProposerIndices []primitives.ValidatorIndex
seed, err = Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
seed, err = helpers.Seed(state, 0, params.BeaconConfig().DomainBeaconProposer)
require.NoError(t, err)
for i := uint64(0); i < uint64(params.BeaconConfig().SlotsPerEpoch); i++ {
seedWithSlot := append(seed[:], bytesutil.Bytes8(i)...)
@@ -309,15 +310,15 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
}
func TestDelayedActivationExitEpoch_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
epoch := primitives.Epoch(9999)
wanted := epoch + 1 + params.BeaconConfig().MaxSeedLookahead
assert.Equal(t, wanted, ActivationExitEpoch(epoch))
assert.Equal(t, wanted, helpers.ActivationExitEpoch(epoch))
}
func TestActiveValidatorCount_Genesis(t *testing.T) {
ClearCache()
helpers.ClearCache()
c := 1000
validators := make([]*ethpb.Validator, c)
@@ -334,10 +335,10 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
require.NoError(t, err)
// Preset cache to a bad count.
seed, err := Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
seed, err := helpers.Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.NoError(t, committeeCache.AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, helpers.CommitteeCache().AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []primitives.ValidatorIndex{1, 2, 3}}))
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count")
}
@@ -353,7 +354,7 @@ func TestChurnLimit_OK(t *testing.T) {
{validatorCount: 2000000, wantedChurn: 30 /* validatorCount/churnLimitQuotient */},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
validators := make([]*ethpb.Validator, test.validatorCount)
for i := 0; i < len(validators); i++ {
@@ -368,9 +369,9 @@ func TestChurnLimit_OK(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
resultChurn := ValidatorActivationChurnLimit(validatorCount)
resultChurn := helpers.ValidatorActivationChurnLimit(validatorCount)
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
}
}
@@ -386,7 +387,7 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
}
for _, test := range tests {
ClearCache()
helpers.ClearCache()
// Create validators
validators := make([]*ethpb.Validator, test.validatorCount)
@@ -405,11 +406,11 @@ func TestChurnLimitDeneb_OK(t *testing.T) {
require.NoError(t, err)
// Get active validator count
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
validatorCount, err := helpers.ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
require.NoError(t, err)
// Test churn limit calculation
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
resultChurn := helpers.ValidatorActivationChurnLimitDeneb(validatorCount)
assert.Equal(t, test.wantedChurn, resultChurn)
}
}
@@ -574,11 +575,11 @@ func TestActiveValidatorIndices(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
helpers.ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
require.NoError(t, err)
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
got, err := helpers.ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
return
@@ -684,12 +685,12 @@ func TestComputeProposerIndex(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
helpers.ClearCache()
bState := &ethpb.BeaconState{Validators: tt.args.validators}
stTrie, err := state_native.InitializeFromProtoUnsafePhase0(bState)
require.NoError(t, err)
got, err := ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
got, err := helpers.ComputeProposerIndex(stTrie, tt.args.indices, tt.args.seed)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
return
@@ -718,9 +719,9 @@ func TestIsEligibleForActivationQueue(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
helpers.ClearCache()
assert.Equal(t, tt.want, IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
assert.Equal(t, tt.want, helpers.IsEligibleForActivationQueue(tt.validator), "IsEligibleForActivationQueue()")
})
}
}
@@ -747,11 +748,11 @@ func TestIsIsEligibleForActivation(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ClearCache()
helpers.ClearCache()
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
assert.Equal(t, tt.want, IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
assert.Equal(t, tt.want, helpers.IsEligibleForActivation(s, tt.validator), "IsEligibleForActivation()")
})
}
}
@@ -765,7 +766,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
hashFunc := hash.CustomSHA256Hasher()
for i := uint64(0); ; i++ {
candidateIndex, err := ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
candidateIndex, err := helpers.ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
if err != nil {
return 0, err
}
@@ -787,7 +788,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
}
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
ClearCache()
helpers.ClearCache()
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
@@ -806,13 +807,13 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
require.NoError(t, beaconState.SetValidators(validators))
require.NoError(t, beaconState.SetBalances(balances))
index, err := LastActivatedValidatorIndex(context.Background(), beaconState)
index, err := helpers.LastActivatedValidatorIndex(context.Background(), beaconState)
require.NoError(t, err)
require.Equal(t, index, primitives.ValidatorIndex(3))
}
func TestProposerIndexFromCheckpoint(t *testing.T) {
ClearCache()
helpers.ClearCache()
e := primitives.Epoch(2)
r := [32]byte{'a'}
@@ -820,10 +821,10 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
ids := [32]primitives.ValidatorIndex{}
slot := primitives.Slot(69) // slot 5 in the Epoch
ids[5] = primitives.ValidatorIndex(19)
proposerIndicesCache.Set(e, r, ids)
helpers.ProposerIndicesCache().Set(e, r, ids)
c := &forkchoicetypes.Checkpoint{Root: root, Epoch: e - 1}
proposerIndicesCache.SetCheckpoint(*c, r)
id, err := ProposerIndexAtSlotFromCheckpoint(c, slot)
helpers.ProposerIndicesCache().SetCheckpoint(*c, r)
id, err := helpers.ProposerIndexAtSlotFromCheckpoint(c, slot)
require.NoError(t, err)
require.Equal(t, ids[5], id)
}

View File

@@ -0,0 +1,21 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["helpers.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,240 @@
package peerdas
import (
"encoding/binary"
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
"github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/p2p/enode"
errors "github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
const (
// Bytes per cell
bytesPerCell = cKzg4844.FieldElementsPerCell * cKzg4844.BytesPerFieldElement
// Number of cells in the extended matrix
extendedMatrixSize = fieldparams.MaxBlobsPerBlock * cKzg4844.CellsPerExtBlob
)
type (
extendedMatrix []cKzg4844.Cell
cellCoordinate struct {
blobIndex uint64
cellID uint64
}
)
var (
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
errCellNotFound = errors.New("cell not found (should never happen)")
)
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
// Compute the custodied subnets.
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
if err != nil {
return nil, errors.Wrap(err, "custody subnets")
}
columnsPerSubnet := cKzg4844.CellsPerExtBlob / dataColumnSidecarSubnetCount
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
// Columns belonging to the same subnet are contiguous.
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
for i := uint64(0); i < columnsPerSubnet; i++ {
for subnetId := range subnetIds {
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
columnIndices[columnIndex] = true
}
}
return columnIndices, nil
}
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
// Check if the custody subnet count is larger than the data column sidecar subnet count.
if custodySubnetCount > dataColumnSidecarSubnetCount {
return nil, errCustodySubnetCountTooLarge
}
// First, compute the subnet IDs that the node should participate in.
subnetIds := make(map[uint64]bool, custodySubnetCount)
for i := uint64(0); uint64(len(subnetIds)) < custodySubnetCount; i++ {
nodeIdUInt256, nextNodeIdUInt256 := new(uint256.Int), new(uint256.Int)
nodeIdUInt256.SetBytes(nodeId.Bytes())
nextNodeIdUInt256.Add(nodeIdUInt256, uint256.NewInt(i))
nextNodeIdUInt64 := nextNodeIdUInt256.Uint64()
nextNodeId := bytesutil.Uint64ToBytesLittleEndian(nextNodeIdUInt64)
hashedNextNodeId := hash.Hash(nextNodeId)
subnetId := binary.LittleEndian.Uint64(hashedNextNodeId[:8]) % dataColumnSidecarSubnetCount
if _, exists := subnetIds[subnetId]; !exists {
subnetIds[subnetId] = true
}
}
return subnetIds, nil
}
// ComputeExtendedMatrix computes the extended matrix from the blobs.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#compute_extended_matrix
func ComputeExtendedMatrix(blobs []cKzg4844.Blob) (extendedMatrix, error) {
matrix := make(extendedMatrix, 0, extendedMatrixSize)
for i := range blobs {
// Chunk a non-extended blob into cells representing the corresponding extended blob.
blob := &blobs[i]
cells, err := cKzg4844.ComputeCells(blob)
if err != nil {
return nil, errors.Wrap(err, "compute cells for blob")
}
matrix = append(matrix, cells[:]...)
}
return matrix, nil
}
// RecoverMatrix recovers the extended matrix from some cells.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
func RecoverMatrix(cellFromCoordinate map[cellCoordinate]cKzg4844.Cell, blobCount uint64) (extendedMatrix, error) {
matrix := make(extendedMatrix, 0, extendedMatrixSize)
for blobIndex := uint64(0); blobIndex < blobCount; blobIndex++ {
// Filter all cells that belong to the current blob.
cellIds := make([]uint64, 0, cKzg4844.CellsPerExtBlob)
for coordinate := range cellFromCoordinate {
if coordinate.blobIndex == blobIndex {
cellIds = append(cellIds, coordinate.cellID)
}
}
// Retrieve cells corresponding to all `cellIds`.
cellIdsCount := len(cellIds)
cells := make([]cKzg4844.Cell, 0, cellIdsCount)
for _, cellId := range cellIds {
coordinate := cellCoordinate{blobIndex: blobIndex, cellID: cellId}
cell, ok := cellFromCoordinate[coordinate]
if !ok {
return matrix, errCellNotFound
}
cells = append(cells, cell)
}
// Recover all cells.
allCellsForRow, err := cKzg4844.RecoverAllCells(cellIds, cells)
if err != nil {
return matrix, errors.Wrap(err, "recover all cells")
}
matrix = append(matrix, allCellsForRow[:]...)
}
return matrix, nil
}
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
func DataColumnSidecars(signedBlock interfaces.SignedBeaconBlock, blobs []cKzg4844.Blob) ([]*ethpb.DataColumnSidecar, error) {
blobsCount := len(blobs)
// Get the signed block header.
signedBlockHeader, err := signedBlock.Header()
if err != nil {
return nil, errors.Wrap(err, "signed block header")
}
// Get the block body.
block := signedBlock.Block()
blockBody := block.Body()
// Get the blob KZG commitments.
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
// Compute the KZG commitments inclusion proof.
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
if err != nil {
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
}
// Compute cells and proofs.
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
for i := range blobs {
blob := &blobs[i]
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndProofs(blob)
if err != nil {
return nil, errors.Wrap(err, "compute cells and proofs")
}
cells = append(cells, blobCells)
proofs = append(proofs, blobProofs)
}
// Get the column sidecars.
sidecars := make([]*ethpb.DataColumnSidecar, cKzg4844.CellsPerExtBlob)
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
column := make([]cKzg4844.Cell, 0, blobsCount)
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
cell := cells[rowIndex][columnIndex]
column = append(column, cell)
kzgProof := proofs[rowIndex][columnIndex]
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
}
columnBytes := make([][]byte, 0, blobsCount)
for i := range column {
cell := column[i]
cellBytes := make([]byte, 0, bytesPerCell)
for _, fieldElement := range cell {
cellBytes = append(cellBytes, fieldElement[:]...)
}
columnBytes = append(columnBytes, cellBytes)
}
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
for _, kzgProof := range kzgProofOfColumn {
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
}
sidecar := &ethpb.DataColumnSidecar{
ColumnIndex: columnIndex,
DataColumn: columnBytes,
KzgCommitments: blobKzgCommitments,
KzgProof: kzgProofOfColumnBytes,
SignedBlockHeader: signedBlockHeader,
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
}
sidecars = append(sidecars, sidecar)
}
return sidecars, nil
}

View File

@@ -59,7 +59,7 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
}
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
// ComputeDomainAndSignWithoutState offers the same functionality as ComputeDomainAndSign without the need to provide a BeaconState.
// This is particularly helpful for signing values in tests.
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.

View File

@@ -94,6 +94,15 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
entry := s.cache.ensure(key)
defer s.cache.delete(key)
root := b.Root()
sumz, err := s.store.WaitForSummarizer(ctx)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
WithError(err).
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
} else {
entry.setDiskSummary(sumz.Summary(root))
}
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -59,7 +60,12 @@ func (c *cache) delete(key cacheKey) {
// cacheEntry holds a fixed-length cache of BlobSidecars.
type cacheEntry struct {
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
diskSummary filesystem.BlobStorageSummary
}
func (e *cacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
e.diskSummary = sum
}
// stash adds an item to the in-memory cache of BlobSidecars.
@@ -81,9 +87,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
// the cache do not match those found in the block. If err is nil, then all expected
// commitments were found in the cache and the sidecar slice return value can be used
// to perform a DA check against the cached sidecars.
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) {
scs := make([]blocks.ROBlob, kc.count())
if e.diskSummary.AllAvailable(kc.count()) {
return nil, nil
}
scs := make([]blocks.ROBlob, 0, kc.count())
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
// We already have this blob, we don't need to write it or validate it.
if e.diskSummary.HasIndex(i) {
continue
}
if kc[i] == nil {
if e.scs[i] != nil {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
@@ -97,7 +111,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
}
scs[i] = *e.scs[i]
scs = append(scs, *e.scs[i])
}
return scs, nil

View File

@@ -3,9 +3,14 @@ package das
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestCacheEnsureDelete(t *testing.T) {
@@ -23,3 +28,145 @@ func TestCacheEnsureDelete(t *testing.T) {
var nilEntry *cacheEntry
require.Equal(t, nilEntry, c.entries[k])
}
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
return func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
require.NoError(t, err)
entry := &cacheEntry{}
if len(onDisk) > 0 {
od := map[[32]byte][]int{blk.Root(): onDisk}
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
sum := sumz.Summary(blk.Root())
entry.setDiskSummary(sum)
}
expected := make([]blocks.ROBlob, 0, nBlobs)
for i := 0; i < commits.count(); i++ {
if entry.diskSummary.HasIndex(uint64(i)) {
continue
}
// If we aren't telling the cache a blob is on disk, add it to the expected list and stash.
expected = append(expected, blobs[i])
require.NoError(t, entry.stash(&blobs[i]))
}
require.Equal(t, numExpected, len(expected))
return entry, commits, expected
}
}
func TestFilterDiskSummary(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup filterTestCaseSetupFunc
}{
{
name: "full blobs, all on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0, 1, 2, 3, 4, 5}, 0),
},
{
name: "full blobs, first on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0}, 5),
},
{
name: "full blobs, middle on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{2}, 5),
},
{
name: "full blobs, last on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{5}, 5),
},
{
name: "full blobs, none on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{}, 6),
},
{
name: "one commitment, on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{0}, 0),
},
{
name: "one commitment, not on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{}, 1),
},
{
name: "two commitments, first on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0}, 1),
},
{
name: "two commitments, last on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{1}, 1),
},
{
name: "two commitments, none on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{}, 2),
},
{
name: "two commitments, all on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0, 1}, 0),
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}
func TestFilter(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
err error
}{
{
name: "commitments mismatch - extra sidecar",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
commits[5] = nil
return entry, commits, expected
},
err: errCommitmentMismatch,
},
{
name: "sidecar missing",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5] = nil
return entry, commits, expected
},
err: errMissingSidecar,
},
{
name: "commitments mismatch - different bytes",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5].KzgCommitment = []byte("nope")
return entry, commits, expected
},
err: errCommitmentMismatch,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}

View File

@@ -4,9 +4,10 @@ go_library(
name = "go_default_library",
srcs = [
"blob.go",
"ephemeral.go",
"cache.go",
"log.go",
"metrics.go",
"mock.go",
"pruner.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
@@ -33,6 +34,7 @@ go_test(
name = "go_default_test",
srcs = [
"blob_test.go",
"cache_test.go",
"pruner_test.go",
],
embed = [":go_default_library"],

View File

@@ -1,7 +1,9 @@
package filesystem
import (
"context"
"fmt"
"math"
"os"
"path"
"strconv"
@@ -103,12 +105,29 @@ func (bs *BlobStorage) WarmCache() {
return
}
go func() {
if err := bs.pruner.prune(0); err != nil {
start := time.Now()
if err := bs.pruner.warmCache(); err != nil {
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
}
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
}()
}
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
// determine which blobs are available.
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
if bs == nil || bs.pruner == nil {
return nil, ErrBlobStorageSummarizerUnavailable
}
return bs.pruner.waitForCache(ctx)
}
// Save saves blobs given a list of sidecars.
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
startTime := time.Now()
@@ -199,6 +218,101 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
return nil
}
// SaveDataColumn saves a data column to our local filesystem.
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
startTime := time.Now()
fname := namerForDataColumn(column)
sszPath := fname.path()
exists, err := afero.Exists(bs.fs, sszPath)
if err != nil {
return err
}
if exists {
log.Debug("Ignoring a duplicate data column sidecar save attempt")
return nil
}
if bs.pruner != nil {
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
if err != nil {
return err
}
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
}
}
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
sidecarData, err := column.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "failed to serialize sidecar data")
} else if len(sidecarData) == 0 {
return errSidecarEmptySSZData
}
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
return err
}
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
partialMoved := false
// Ensure the partial file is deleted.
defer func() {
if partialMoved {
return
}
// It's expected to error if the save is successful.
err = bs.fs.Remove(partPath)
if err == nil {
log.WithFields(logrus.Fields{
"partPath": partPath,
}).Debugf("Removed partial file")
}
}()
// Create a partial file and write the serialized data to it.
partialFile, err := bs.fs.Create(partPath)
if err != nil {
return errors.Wrap(err, "failed to create partial file")
}
n, err := partialFile.Write(sidecarData)
if err != nil {
closeErr := partialFile.Close()
if closeErr != nil {
return closeErr
}
return errors.Wrap(err, "failed to write to partial file")
}
if bs.fsync {
if err := partialFile.Sync(); err != nil {
return err
}
}
if err := partialFile.Close(); err != nil {
return err
}
if n != len(sidecarData) {
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
}
if n == 0 {
return errEmptyBlobWritten
}
// Atomically rename the partial file to its final name.
err = bs.fs.Rename(partPath, sszPath)
if err != nil {
return errors.Wrap(err, "failed to rename partial file to final name")
}
partialMoved = true
// TODO: Use new metrics for data columns
blobsWrittenCounter.Inc()
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
// Get retrieves a single BlobSidecar by its root and index.
// Since BlobStorage only writes blobs that have undergone full verification, the return
// value is always a VerifiedROBlob.
@@ -224,6 +338,20 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
return verification.BlobSidecarNoop(ro)
}
// GetColumn retrieves a single DataColumnSidecar by its root and index.
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
expected := blobNamer{root: root, index: idx}
encoded, err := afero.ReadFile(bs.fs, expected.path())
if err != nil {
return nil, err
}
s := &ethpb.DataColumnSidecar{}
if err := s.UnmarshalSSZ(encoded); err != nil {
return nil, err
}
return s, nil
}
// Remove removes all blobs for a given root.
func (bs *BlobStorage) Remove(root [32]byte) error {
rootDir := blobNamer{root: root}.dir()
@@ -267,6 +395,41 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
return mask, nil
}
// ColumnIndices retrieve the stored column indexes from our filesystem.
func (bs *BlobStorage) ColumnIndices(root [32]byte) ([fieldparams.NumberOfColumns]bool, error) {
var mask [fieldparams.NumberOfColumns]bool
rootDir := blobNamer{root: root}.dir()
entries, err := afero.ReadDir(bs.fs, rootDir)
if err != nil {
if os.IsNotExist(err) {
return mask, nil
}
return mask, err
}
for i := range entries {
if entries[i].IsDir() {
continue
}
name := entries[i].Name()
if !strings.HasSuffix(name, sszExt) {
continue
}
parts := strings.Split(name, ".")
if len(parts) != 2 {
continue
}
u, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
}
if u >= fieldparams.NumberOfColumns {
return mask, errIndexOutOfBounds
}
mask[u] = true
}
return mask, nil
}
// Clear deletes all files on the filesystem.
func (bs *BlobStorage) Clear() error {
dirs, err := listDir(bs.fs, ".")
@@ -281,6 +444,15 @@ func (bs *BlobStorage) Clear() error {
return nil
}
// WithinRetentionPeriod checks if the requested epoch is within the blob retention period.
func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch) bool {
if requested > math.MaxUint64-bs.retentionEpochs {
// If there is an overflow, then the retention period was set to an extremely large number.
return true
}
return requested+bs.retentionEpochs >= current
}
type blobNamer struct {
root [32]byte
index uint64
@@ -290,6 +462,10 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
}
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
}
func (p blobNamer) dir() string {
return rootString(p.root)
}

View File

@@ -2,6 +2,7 @@ package filesystem
import (
"bytes"
"math"
"os"
"path"
"sync"
@@ -24,8 +25,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
require.NoError(t, err)
t.Run("no error for duplicate", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
existingSidecar := testSidecars[0]
blobPath := namerForSidecar(existingSidecar).path()
@@ -129,8 +129,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
// pollUntil polls a condition function until it returns true or a timeout is reached.
func TestBlobIndicesBounds(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
root := [32]byte{}
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
@@ -161,8 +160,7 @@ func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
func TestBlobStoragePrune(t *testing.T) {
currentSlot := primitives.Slot(200000)
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
t.Run("PruneOne", func(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, fieldparams.MaxBlobsPerBlock)
@@ -218,8 +216,7 @@ func TestBlobStoragePrune(t *testing.T) {
func BenchmarkPruning(b *testing.B) {
var t *testing.T
_, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
_, bs := NewEphemeralBlobStorageWithFs(t)
blockQty := 10000
currentSlot := primitives.Slot(150000)
@@ -248,3 +245,50 @@ func TestNewBlobStorage(t *testing.T) {
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
require.NoError(t, err)
}
func TestConfig_WithinRetentionPeriod(t *testing.T) {
retention := primitives.Epoch(16)
storage := &BlobStorage{retentionEpochs: retention}
cases := []struct {
name string
requested primitives.Epoch
current primitives.Epoch
within bool
}{
{
name: "before",
requested: 0,
current: retention + 1,
within: false,
},
{
name: "same",
requested: 0,
current: 0,
within: true,
},
{
name: "boundary",
requested: 0,
current: retention,
within: true,
},
{
name: "one less",
requested: retention - 1,
current: retention,
within: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
require.Equal(t, c.within, storage.WithinRetentionPeriod(c.requested, c.current))
})
}
t.Run("overflow", func(t *testing.T) {
storage := &BlobStorage{retentionEpochs: math.MaxUint64}
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
})
}

View File

@@ -0,0 +1,119 @@
package filesystem
import (
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
type BlobStorageSummary struct {
slot primitives.Slot
mask blobIndexMask
}
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
if idx >= fieldparams.MaxBlobsPerBlock {
return false
}
return s.mask[idx]
}
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
func (s BlobStorageSummary) AllAvailable(count int) bool {
if count > fieldparams.MaxBlobsPerBlock {
return false
}
for i := 0; i < count; i++ {
if !s.mask[i] {
return false
}
}
return true
}
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
type BlobStorageSummarizer interface {
Summary(root [32]byte) BlobStorageSummary
}
type blobStorageCache struct {
mu sync.RWMutex
nBlobs float64
cache map[string]BlobStorageSummary
}
var _ BlobStorageSummarizer = &blobStorageCache{}
func newBlobStorageCache() *blobStorageCache {
return &blobStorageCache{
cache: make(map[string]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
// BlobSidecars based on Index.
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
k := rootString(root)
s.mu.RLock()
defer s.mu.RUnlock()
return s.cache[k]
}
func (s *blobStorageCache) ensure(key string, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.mu.Lock()
defer s.mu.Unlock()
v := s.cache[key]
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *blobStorageCache) slot(key string) (primitives.Slot, bool) {
s.mu.RLock()
defer s.mu.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *blobStorageCache) evict(key string) {
var deleted float64
s.mu.Lock()
v, ok := s.cache[key]
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
}
delete(s.cache, key)
s.mu.Unlock()
if deleted > 0 {
s.updateMetrics(-deleted)
}
}
func (s *blobStorageCache) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}

View File

@@ -0,0 +1,150 @@
package filesystem
import (
"testing"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestSlotByRoot_Summary(t *testing.T) {
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
firstSet[0] = true
lastSet[len(lastSet)-1] = true
oneSet[1] = true
for i := range allSet {
allSet[i] = true
}
cases := []struct {
name string
root [32]byte
expected *blobIndexMask
}{
{
name: "not found",
},
{
name: "none set",
expected: &noneSet,
},
{
name: "index 1 set",
expected: &oneSet,
},
{
name: "all set",
expected: &allSet,
},
{
name: "first set",
expected: &firstSet,
},
{
name: "last set",
expected: &lastSet,
},
}
sc := newBlobStorageCache()
for _, c := range cases {
if c.expected != nil {
key := rootString(bytesutil.ToBytes32([]byte(c.name)))
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
}
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
key := bytesutil.ToBytes32([]byte(c.name))
sum := sc.Summary(key)
for i := range c.expected {
ui := uint64(i)
if c.expected == nil {
require.Equal(t, false, sum.HasIndex(ui))
} else {
require.Equal(t, c.expected[i], sum.HasIndex(ui))
}
}
})
}
}
func TestAllAvailable(t *testing.T) {
idxUpTo := func(u int) []int {
r := make([]int, u)
for i := range r {
r[i] = i
}
return r
}
require.DeepEqual(t, []int{}, idxUpTo(0))
require.DeepEqual(t, []int{0}, idxUpTo(1))
require.DeepEqual(t, []int{0, 1, 2, 3, 4, 5}, idxUpTo(6))
cases := []struct {
name string
idxSet []int
count int
aa bool
}{
{
// If there are no blobs committed, then all the committed blobs are available.
name: "none in idx, 0 arg",
count: 0,
aa: true,
},
{
name: "none in idx, 1 arg",
count: 1,
aa: false,
},
{
name: "first in idx, 1 arg",
idxSet: []int{0},
count: 1,
aa: true,
},
{
name: "second in idx, 1 arg",
idxSet: []int{1},
count: 1,
aa: false,
},
{
name: "first missing, 2 arg",
idxSet: []int{1},
count: 2,
aa: false,
},
{
name: "all missing, 1 arg",
count: 6,
aa: false,
},
{
name: "out of bound is safe",
count: fieldparams.MaxBlobsPerBlock + 1,
aa: false,
},
{
name: "max present",
count: fieldparams.MaxBlobsPerBlock,
idxSet: idxUpTo(fieldparams.MaxBlobsPerBlock),
aa: true,
},
{
name: "one present",
count: 1,
idxSet: idxUpTo(1),
aa: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var mask blobIndexMask
for _, idx := range c.idxSet {
mask[idx] = true
}
sum := BlobStorageSummary{mask: mask}
require.Equal(t, c.aa, sum.AllAvailable(c.count))
})
}
}

View File

@@ -12,7 +12,7 @@ import (
// improving test performance and simplifying cleanup.
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
if err != nil {
t.Fatal("test setup issue", err)
}
@@ -21,13 +21,13 @@ func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
// in order to interact with it outside the parameters of the BlobStorage api.
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage) {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
if err != nil {
t.Fatal("test setup issue", err)
}
return fs, &BlobStorage{fs: fs, pruner: pruner}, nil
return fs, &BlobStorage{fs: fs, pruner: pruner}
}
type BlobMocker struct {
@@ -61,3 +61,15 @@ func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage)
bs := &BlobStorage{fs: fs}
return &BlobMocker{fs: fs, bs: bs}, bs
}
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
c := newBlobStorageCache()
for k, v := range set {
for i := range v {
if err := c.ensure(rootString(k), 0, uint64(v[i])); err != nil {
t.Fatal(err)
}
}
}
return c
}

View File

@@ -1,6 +1,7 @@
package filesystem
import (
"context"
"encoding/binary"
"io"
"path"
@@ -12,7 +13,6 @@ import (
"time"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -32,22 +32,39 @@ type blobPruner struct {
sync.Mutex
prunedBefore atomic.Uint64
windowSize primitives.Slot
slotMap *slotForRoot
cache *blobStorageCache
cacheReady chan struct{}
warmed bool
fs afero.Fs
}
func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
type prunerOpt func(*blobPruner) error
func withWarmedCache() prunerOpt {
return func(p *blobPruner) error {
return p.warmCache()
}
}
func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*blobPruner, error) {
r, err := slots.EpochStart(retain + retentionBuffer)
if err != nil {
return nil, errors.Wrap(err, "could not set retentionSlots")
}
return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil
cw := make(chan struct{})
p := &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheReady: cw}
for _, o := range opts {
if err := o(p); err != nil {
return nil, err
}
}
return p, nil
}
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
if err := p.slotMap.ensure(rootString(root), latest, idx); err != nil {
if err := p.cache.ensure(rootString(root), latest, idx); err != nil {
return err
}
pruned := uint64(windowMin(latest, p.windowSize))
@@ -55,6 +72,8 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
go func() {
p.Lock()
defer p.Unlock()
if err := p.prune(primitives.Slot(pruned)); err != nil {
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
}
@@ -62,7 +81,7 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
func windowMin(latest, offset primitives.Slot) primitives.Slot {
// Safely compute the first slot in the epoch for the latest slot
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
if latest < offset {
@@ -71,12 +90,32 @@ func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
return latest - offset
}
func (p *blobPruner) warmCache() error {
p.Lock()
defer p.Unlock()
if err := p.prune(0); err != nil {
return err
}
if !p.warmed {
p.warmed = true
close(p.cacheReady)
}
return nil
}
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
select {
case <-p.cacheReady:
return p.cache, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// Prune prunes blobs in the base directory based on the retention epoch.
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
p.Lock()
defer p.Unlock()
start := time.Now()
totalPruned, totalErr := 0, 0
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
@@ -122,7 +161,7 @@ func shouldRetain(slot, pruneBefore primitives.Slot) bool {
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
root := rootFromDir(dir)
slot, slotCached := p.slotMap.slot(root)
slot, slotCached := p.cache.slot(root)
// Return early if the slot is cached and doesn't need pruning.
if slotCached && shouldRetain(slot, pruneBefore) {
return 0, nil
@@ -151,7 +190,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
if err != nil {
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
}
if err := p.slotMap.ensure(root, slot, idx); err != nil {
if err := p.cache.ensure(root, slot, idx); err != nil {
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
}
}
@@ -179,7 +218,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
}
p.slotMap.evict(rootFromDir(dir))
p.cache.evict(rootFromDir(dir))
return len(scFiles), nil
}
@@ -269,71 +308,3 @@ func filterSsz(s string) bool {
func filterPart(s string) bool {
return filepath.Ext(s) == dotPartExt
}
func newSlotForRoot() *slotForRoot {
return &slotForRoot{
cache: make(map[string]*slotCacheEntry, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
type slotCacheEntry struct {
slot primitives.Slot
mask [fieldparams.MaxBlobsPerBlock]bool
}
type slotForRoot struct {
sync.RWMutex
nBlobs float64
cache map[string]*slotCacheEntry
}
func (s *slotForRoot) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}
func (s *slotForRoot) ensure(key string, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
if !ok {
v = &slotCacheEntry{}
}
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
s.RLock()
defer s.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *slotForRoot) evict(key string) {
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
var deleted float64
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
s.updateMetrics(-deleted)
}
delete(s.cache, key)
}

View File

@@ -28,7 +28,7 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
root := fmt.Sprintf("%#x", sc.BlockRoot())
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, pr.windowSize)
require.NoError(t, err)
require.Equal(t, 0, pruned)
@@ -45,14 +45,13 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
require.NoError(t, err)
root := fmt.Sprintf("%#x", sc.BlockRoot())
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, slot+1)
require.NoError(t, err)
require.Equal(t, 0, pruned)
})
t.Run("blobs to delete", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
var slot primitives.Slot = 0
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
@@ -63,7 +62,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, cok := bs.pruner.slotMap.slot(root)
cs, cok := bs.pruner.cache.slot(root)
require.Equal(t, true, cok)
require.Equal(t, slot, cs)
@@ -83,8 +82,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
func TestTryPruneDir_SlotFromFile(t *testing.T) {
t.Run("expired blobs deleted", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
var slot primitives.Slot = 0
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
@@ -95,12 +93,12 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, ok := bs.pruner.slotMap.slot(root)
cs, ok := bs.pruner.cache.slot(root)
require.Equal(t, true, ok)
require.Equal(t, slot, cs)
// evict it from the cache so that we trigger the file read path
bs.pruner.slotMap.evict(root)
_, ok = bs.pruner.slotMap.slot(root)
bs.pruner.cache.evict(root)
_, ok = bs.pruner.cache.slot(root)
require.Equal(t, false, ok)
// ensure that we see the saved files in the filesystem
@@ -116,10 +114,9 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
require.Equal(t, 0, len(files))
})
t.Run("not expired, intact", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
// Set slot equal to the window size, so it should be retained.
var slot primitives.Slot = bs.pruner.windowSize
slot := bs.pruner.windowSize
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
@@ -129,8 +126,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
// Evict slot mapping from the cache so that we trigger the file read path.
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
bs.pruner.slotMap.evict(root)
_, ok := bs.pruner.slotMap.slot(root)
bs.pruner.cache.evict(root)
_, ok := bs.pruner.cache.slot(root)
require.Equal(t, false, ok)
// Ensure that we see the saved files in the filesystem.
@@ -184,8 +181,7 @@ func TestSlotFromFile(t *testing.T) {
}
for _, c := range cases {
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
fs, bs := NewEphemeralBlobStorageWithFs(t)
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
sc, err := verification.BlobSidecarNoop(sidecars[0])
require.NoError(t, err)
@@ -243,10 +239,8 @@ func TestListDir(t *testing.T) {
}
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
fsLayout.children = append(fsLayout.children, notABlob)
fsLayout.children = append(fsLayout.children, childlessBlob)
fsLayout.children = append(fsLayout.children, blobWithSsz)
fsLayout.children = append(fsLayout.children, blobWithSszAndTmp)
fsLayout.children = append(fsLayout.children,
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
topChildren := make([]string, len(fsLayout.children))
for i := range fsLayout.children {
@@ -282,10 +276,7 @@ func TestListDir(t *testing.T) {
dirPath: ".",
expected: []string{notABlob.name},
filter: func(s string) bool {
if s == notABlob.name {
return true
}
return false
return s == notABlob.name
},
},
{

View File

@@ -697,7 +697,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
}
// Encode attestation record to bytes.
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
if att == nil || att.IndexedAttestation == nil {
return []byte{}, errors.New("nil proposal record")
@@ -716,7 +716,7 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
}
// Decode attestation record from bytes.
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
// The input encoded attestation record consists in the signing root concatenated with the compressed attestation record.
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
if len(encoded) < rootSize {
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", rootSize, len(encoded))

View File

@@ -10,7 +10,7 @@ import (
)
// TestCleanup ensures that the cleanup function unregisters the prometheus.Collection
// also tests the interchangability of the explicit prometheus Register/Unregister
// also tests the interchangeability of the explicit prometheus Register/Unregister
// and the implicit methods within the collector implementation
func TestCleanup(t *testing.T) {
ctx := context.Background()
@@ -32,11 +32,11 @@ func TestCleanup(t *testing.T) {
assert.Equal(t, true, unregistered, "prometheus.Unregister failed to unregister PowchainCollector on final cleanup")
}
// TestCancelation tests that canceling the context passed into
// TestCancellation tests that canceling the context passed into
// NewPowchainCollector cleans everything up as expected. This
// does come at the cost of an extra channel cluttering up
// PowchainCollector, just for this test.
func TestCancelation(t *testing.T) {
func TestCancellation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")

View File

@@ -643,9 +643,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
if err := d.ClearDB(); err != nil {
return errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return errors.Wrap(err, "could not clear blob storage")
}
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
@@ -707,6 +705,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),

View File

@@ -217,9 +217,9 @@ func Test_hasNetworkFlag(t *testing.T) {
want bool
}{
{
name: "Prater testnet",
networkName: features.PraterTestnet.Name,
networkValue: "prater",
name: "Holesky testnet",
networkName: features.HoleskyTestnet.Name,
networkValue: "holesky",
want: true,
},
{

View File

@@ -46,6 +46,7 @@ go_library(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
@@ -56,6 +57,7 @@ go_library(
"//beacon-chain/startup:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
@@ -90,6 +92,7 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",

View File

@@ -10,6 +10,8 @@ import (
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
@@ -95,7 +97,12 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
return nil
}
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
func (s *Service) internalBroadcastAttestation(
ctx context.Context,
subnet uint64,
att *ethpb.Attestation,
forkDigest [fieldparams.VersionLength]byte,
) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -137,11 +144,11 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
// In the event our attestation is outdated and beyond the
// acceptable threshold, we exit early and do not broadcast it.
currSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.genesisTime, params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
log.WithFields(logrus.Fields{
"attestationSlot": att.Data.Slot,
"currentSlot": currSlot,
}).Warning("Attestation is too old to broadcast, discarding it")
}).WithError(err).Warning("Attestation is too old to broadcast, discarding it")
return
}
@@ -151,7 +158,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
}
}
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -227,7 +234,12 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
return nil
}
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
func (s *Service) internalBroadcastBlob(
ctx context.Context,
subnet uint64,
blobSidecar *ethpb.BlobSidecar,
forkDigest [fieldparams.VersionLength]byte,
) {
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -242,7 +254,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
s.subnetLocker(wrappedSubIdx).RUnlock()
if !hasPeer {
blobSidecarCommitteeBroadcastAttempts.Inc()
blobSidecarBroadcastAttempts.Inc()
if err := func() error {
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
@@ -251,7 +263,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
return err
}
if ok {
blobSidecarCommitteeBroadcasts.Inc()
blobSidecarBroadcasts.Inc()
return nil
}
return errors.New("failed to find peers for subnet")
@@ -267,6 +279,99 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
}
}
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
// broadcasted to the current fork and to the input column subnet.
// TODO: Add tests
func (s *Service) BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error {
// Add tracing to the function.
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
defer span.End()
// Ensure the data column sidecar is not nil.
if dataColumnSidecar == nil {
return errors.New("attempted to broadcast nil data column sidecar")
}
// Retrieve the current fork digest.
forkDigest, err := s.currentForkDigest()
if err != nil {
err := errors.Wrap(err, "current fork digest")
tracing.AnnotateError(span, err)
return err
}
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
go s.internalBroadcastDataColumn(ctx, columnSubnet, dataColumnSidecar, forkDigest)
return nil
}
func (s *Service) internalBroadcastDataColumn(
ctx context.Context,
columnSubnet uint64,
dataColumnSidecar *ethpb.DataColumnSidecar,
forkDigest [fieldparams.VersionLength]byte,
) {
// Add tracing to the function.
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
defer span.End()
// Increase the number of broadcast attempts.
dataColumnSidecarBroadcastAttempts.Inc()
// Clear parent context / deadline.
ctx = trace.NewContext(context.Background(), span)
// Define a one-slot length context timeout.
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
ctx, cancel := context.WithTimeout(ctx, oneSlot)
defer cancel()
// Build the topic corresponding to this column subnet and this fork digest.
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
// Compute the wrapped subnet index.
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
// Check if we have peers with this subnet.
hasPeer := func() bool {
s.subnetLocker(wrappedSubIdx).RLock()
defer s.subnetLocker(wrappedSubIdx).RUnlock()
return s.hasPeerWithSubnet(topic)
}()
// If no peers are found, attempt to find peers with this subnet.
if !hasPeer {
if err := func() error {
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
if err != nil {
return errors.Wrap(err, "find peers for subnet")
}
if ok {
return nil
}
return errors.New("failed to find peers for subnet")
}(); err != nil {
log.WithError(err).Error("Failed to find peers")
tracing.AnnotateError(span, err)
}
}
// Broadcast the data column sidecar to the network.
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
log.WithError(err).Error("Failed to broadcast blob sidecar")
tracing.AnnotateError(span, err)
}
// Increase the number of successful broadcasts.
blobSidecarBroadcasts.Inc()
}
// method to broadcast messages to other peers in our gossip mesh.
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
@@ -296,14 +401,18 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
return nil
}
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
}
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
}
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
}
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
}

View File

@@ -24,6 +24,7 @@ type Config struct {
PrivateKey string
DataDir string
MetaDataDir string
QUICPort uint
TCPPort uint
UDPPort uint
MaxPeers uint

View File

@@ -15,6 +15,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
@@ -39,10 +40,21 @@ const (
udp6
)
// RefreshENR uses an epoch to refresh the enr entry for our node
// with the tracked committee ids for the epoch, allowing our node
// to be dynamically discoverable by others given our tracked committee ids.
func (s *Service) RefreshENR() {
type (
quicProtocol uint16
custodySubnetCount uint64
)
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
func (quicProtocol) ENRKey() string { return "quic" }
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
func (custodySubnetCount) ENRKey() string { return "custody_subnet_count" }
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
// This routine checks for our attestation, sync committee and data column subnets and updates them if they have
// been rotated.
func (s *Service) RefreshPersistentSubnets() {
// return early if discv5 isnt running
if s.dv5Listener == nil || !s.isInitialized() {
return
@@ -52,6 +64,10 @@ func (s *Service) RefreshENR() {
log.WithError(err).Error("Could not initialize persistent subnets")
return
}
if err := initializePersistentColumnSubnets(s.dv5Listener.LocalNode().ID()); err != nil {
log.WithError(err).Error("Could not initialize persistent column subnets")
return
}
bitV := bitfield.NewBitvector64()
committees := cache.SubnetIDs.GetAllSubnets()
@@ -100,14 +116,15 @@ func (s *Service) RefreshENR() {
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
func (s *Service) listenForNewNodes() {
iterator := s.dv5Listener.RandomNodes()
iterator = enode.Filter(iterator, s.filterPeer)
iterator := enode.Filter(s.dv5Listener.RandomNodes(), s.filterPeer)
defer iterator.Close()
for {
// Exit if service's context is canceled
// Exit if service's context is canceled.
if s.ctx.Err() != nil {
break
}
if s.isPeerAtLimit(false /* inbound */) {
// Pause the main loop for a period to stop looking
// for new peers.
@@ -115,16 +132,22 @@ func (s *Service) listenForNewNodes() {
time.Sleep(pollingPeriod)
continue
}
exists := iterator.Next()
if !exists {
if exists := iterator.Next(); !exists {
break
}
node := iterator.Node()
peerInfo, _, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Error("Could not convert to peer info")
continue
}
if peerInfo == nil {
continue
}
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
s.Peers().RandomizeBackOff(peerInfo.ID)
go func(info *peer.AddrInfo) {
@@ -167,8 +190,7 @@ func (s *Service) createListener(
// Listen to all network interfaces
// for both ip protocols.
networkVersion := "udp"
conn, err := net.ListenUDP(networkVersion, udpAddr)
conn, err := net.ListenUDP("udp", udpAddr)
if err != nil {
return nil, errors.Wrap(err, "could not listen to UDP")
}
@@ -178,6 +200,7 @@ func (s *Service) createListener(
ipAddr,
int(s.cfg.UDPPort),
int(s.cfg.TCPPort),
int(s.cfg.QUICPort),
)
if err != nil {
return nil, errors.Wrap(err, "could not create local node")
@@ -209,7 +232,7 @@ func (s *Service) createListener(
func (s *Service) createLocalNode(
privKey *ecdsa.PrivateKey,
ipAddr net.IP,
udpPort, tcpPort int,
udpPort, tcpPort, quicPort int,
) (*enode.LocalNode, error) {
db, err := enode.OpenDB("")
if err != nil {
@@ -218,11 +241,24 @@ func (s *Service) createLocalNode(
localNode := enode.NewLocalNode(db, privKey)
ipEntry := enr.IP(ipAddr)
udpEntry := enr.UDP(udpPort)
tcpEntry := enr.TCP(tcpPort)
localNode.Set(ipEntry)
udpEntry := enr.UDP(udpPort)
localNode.Set(udpEntry)
tcpEntry := enr.TCP(tcpPort)
localNode.Set(tcpEntry)
if features.Get().EnableQUIC {
quicEntry := quicProtocol(quicPort)
localNode.Set(quicEntry)
}
if features.Get().EnablePeerDAS {
custodySubnetEntry := custodySubnetCount(params.BeaconConfig().CustodyRequirement)
localNode.Set(custodySubnetEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)
@@ -277,7 +313,7 @@ func (s *Service) startDiscoveryV5(
// filterPeer validates each node that we retrieve from our dht. We
// try to ascertain that the peer can be a valid protocol peer.
// Validity Conditions:
// 1. Peer has a valid IP and TCP port set in their enr.
// 1. Peer has a valid IP and a (QUIC and/or TCP) port set in their enr.
// 2. Peer hasn't been marked as 'bad'.
// 3. Peer is not currently active or connected.
// 4. Peer is ready to receive incoming connections.
@@ -294,17 +330,13 @@ func (s *Service) filterPeer(node *enode.Node) bool {
return false
}
// Ignore nodes with their TCP ports not set.
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
if !enr.IsNotFound(err) {
log.WithError(err).Debug("Could not retrieve tcp port")
}
peerData, multiAddrs, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Debug("Could not convert to peer data")
return false
}
peerData, multiAddr, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Debug("Could not convert to peer data")
if peerData == nil || len(multiAddrs) == 0 {
return false
}
@@ -337,6 +369,9 @@ func (s *Service) filterPeer(node *enode.Node) bool {
}
}
// If the peer has 2 multiaddrs, favor the QUIC address, which is in first position.
multiAddr := multiAddrs[0]
// Add peer to peer handler.
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
@@ -380,11 +415,11 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
if err != nil {
return nil, errors.Wrapf(err, "Could not get enode from string")
}
addr, err := convertToSingleMultiAddr(enodeAddr)
nodeAddrs, err := retrieveMultiAddrsFromNode(enodeAddr)
if err != nil {
return nil, errors.Wrapf(err, "Could not get multiaddr")
}
allAddrs = append(allAddrs, addr)
allAddrs = append(allAddrs, nodeAddrs...)
}
return allAddrs, nil
}
@@ -419,45 +454,139 @@ func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) {
}
func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
var multiAddrs []ma.Multiaddr
// Expect each node to have a TCP and a QUIC address.
multiAddrs := make([]ma.Multiaddr, 0, 2*len(nodes))
for _, node := range nodes {
// ignore nodes with no ip address stored
// Skip nodes with no ip address stored.
if node.IP() == nil {
continue
}
multiAddr, err := convertToSingleMultiAddr(node)
// Get up to two multiaddrs (TCP and QUIC) for each node.
nodeMultiAddrs, err := retrieveMultiAddrsFromNode(node)
if err != nil {
log.WithError(err).Error("Could not convert to multiAddr")
log.WithError(err).Errorf("Could not convert to multiAddr node %s", node)
continue
}
multiAddrs = append(multiAddrs, multiAddr)
multiAddrs = append(multiAddrs, nodeMultiAddrs...)
}
return multiAddrs
}
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, ma.Multiaddr, error) {
multiAddr, err := convertToSingleMultiAddr(node)
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
multiAddrs, err := retrieveMultiAddrsFromNode(node)
if err != nil {
return nil, nil, err
}
info, err := peer.AddrInfoFromP2pAddr(multiAddr)
if err != nil {
return nil, nil, err
if len(multiAddrs) == 0 {
return nil, nil, nil
}
return info, multiAddr, nil
infos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert to peer info: %v", multiAddrs)
}
if len(infos) != 1 {
return nil, nil, errors.Errorf("infos contains %v elements, expected exactly 1", len(infos))
}
return &infos[0], multiAddrs, nil
}
func convertToSingleMultiAddr(node *enode.Node) (ma.Multiaddr, error) {
// retrieveMultiAddrsFromNode converts an enode.Node to a list of multiaddrs.
// If the node has a both a QUIC and a TCP port set in their ENR, then
// the multiaddr corresponding to the QUIC port is added first, followed
// by the multiaddr corresponding to the TCP port.
func retrieveMultiAddrsFromNode(node *enode.Node) ([]ma.Multiaddr, error) {
multiaddrs := make([]ma.Multiaddr, 0, 2)
// Retrieve the node public key.
pubkey := node.Pubkey()
assertedKey, err := ecdsaprysm.ConvertToInterfacePubkey(pubkey)
if err != nil {
return nil, errors.Wrap(err, "could not get pubkey")
}
// Compute the node ID from the public key.
id, err := peer.IDFromPublicKey(assertedKey)
if err != nil {
return nil, errors.Wrap(err, "could not get peer id")
}
return multiAddressBuilderWithID(node.IP().String(), "tcp", uint(node.TCP()), id)
if features.Get().EnableQUIC {
// If the QUIC entry is present in the ENR, build the corresponding multiaddress.
port, ok, err := getPort(node, quic)
if err != nil {
return nil, errors.Wrap(err, "could not get QUIC port")
}
if ok {
addr, err := multiAddressBuilderWithID(node.IP(), quic, port, id)
if err != nil {
return nil, errors.Wrap(err, "could not build QUIC address")
}
multiaddrs = append(multiaddrs, addr)
}
}
// If the TCP entry is present in the ENR, build the corresponding multiaddress.
port, ok, err := getPort(node, tcp)
if err != nil {
return nil, errors.Wrap(err, "could not get TCP port")
}
if ok {
addr, err := multiAddressBuilderWithID(node.IP(), tcp, port, id)
if err != nil {
return nil, errors.Wrap(err, "could not build TCP address")
}
multiaddrs = append(multiaddrs, addr)
}
return multiaddrs, nil
}
// getPort retrieves the port for a given node and protocol, as well as a boolean
// indicating whether the port was found, and an error
func getPort(node *enode.Node, protocol internetProtocol) (uint, bool, error) {
var (
port uint
err error
)
switch protocol {
case tcp:
var entry enr.TCP
err = node.Load(&entry)
port = uint(entry)
case udp:
var entry enr.UDP
err = node.Load(&entry)
port = uint(entry)
case quic:
var entry quicProtocol
err = node.Load(&entry)
port = uint(entry)
default:
return 0, false, errors.Errorf("invalid protocol: %v", protocol)
}
if enr.IsNotFound(err) {
return port, false, nil
}
if err != nil {
return 0, false, errors.Wrap(err, "could not get port")
}
return port, true, nil
}
func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
@@ -475,14 +604,14 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
var ip4 enr.IPv4
var ip6 enr.IPv6
if node.Load(&ip4) == nil {
address, ipErr := multiAddressBuilderWithID(net.IP(ip4).String(), "udp", uint(node.UDP()), id)
address, ipErr := multiAddressBuilderWithID(net.IP(ip4), udp, uint(node.UDP()), id)
if ipErr != nil {
return nil, errors.Wrap(ipErr, "could not build IPv4 address")
}
addresses = append(addresses, address)
}
if node.Load(&ip6) == nil {
address, ipErr := multiAddressBuilderWithID(net.IP(ip6).String(), "udp", uint(node.UDP()), id)
address, ipErr := multiAddressBuilderWithID(net.IP(ip6), udp, uint(node.UDP()), id)
if ipErr != nil {
return nil, errors.Wrap(ipErr, "could not build IPv6 address")
}

View File

@@ -166,8 +166,9 @@ func TestCreateLocalNode(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
// Define ports.
const (
udpPort = 2000
tcpPort = 3000
udpPort = 2000
tcpPort = 3000
quicPort = 3000
)
// Create a private key.
@@ -180,7 +181,7 @@ func TestCreateLocalNode(t *testing.T) {
cfg: tt.cfg,
}
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort)
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
if tt.expectedError {
require.NotNil(t, err)
return
@@ -237,7 +238,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
}
node, err := s.createLocalNode(pkey, addr, 0, 0)
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
require.NoError(t, err)
multiAddr := convertToMultiAddr([]*enode.Node{node.Node()})
assert.Equal(t, 0, len(multiAddr), "Invalid ip address converted successfully")
@@ -248,8 +249,9 @@ func TestMultiAddrConversion_OK(t *testing.T) {
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
cfg: &Config{
TCPPort: 0,
UDPPort: 0,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
@@ -315,7 +317,7 @@ func TestHostIsResolved(t *testing.T) {
// As defined in RFC 2606 , example.org is a
// reserved example domain name.
exampleHost := "example.org"
exampleIP := "93.184.216.34"
exampleIP := "93.184.215.14"
s := &Service{
cfg: &Config{
@@ -599,7 +601,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := tt.svcBuilder(t)
s.RefreshENR()
s.RefreshPersistentSubnets()
tt.postValidation(t, s)
s.dv5Listener.Close()
cache.SubnetIDs.EmptyAllCaches()

View File

@@ -28,7 +28,8 @@ import (
)
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
port := 2000
const port = 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
@@ -53,7 +54,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
var listeners []*discover.UDPv5
for i := 1; i <= 5; i++ {
port = 3000 + i
port := 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
@@ -98,13 +99,14 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
var addrs []ma.Multiaddr
for _, n := range nodes {
if s.filterPeer(n) {
addr, err := convertToSingleMultiAddr(n)
addrs := make([]ma.Multiaddr, 0)
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
require.NoError(t, err)
addrs = append(addrs, addr)
addrs = append(addrs, nodeAddrs...)
}
}
@@ -114,10 +116,11 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
}
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
const port = 2000
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
logrus.SetLevel(logrus.TraceLevel)
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
@@ -138,7 +141,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
var listeners []*discover.UDPv5
for i := 1; i <= 5; i++ {
port = 3000 + i
port := 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
@@ -188,13 +191,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
var addrs []ma.Multiaddr
addrs := make([]ma.Multiaddr, 0, len(nodes))
for _, n := range nodes {
if s.filterPeer(n) {
addr, err := convertToSingleMultiAddr(n)
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
require.NoError(t, err)
addrs = append(addrs, addr)
addrs = append(addrs, nodeAddrs...)
}
}
if len(addrs) == 0 {

View File

@@ -22,6 +22,7 @@ var gossipTopicMappings = map[string]proto.Message{
SyncCommitteeSubnetTopicFormat: &ethpb.SyncCommitteeMessage{},
BlsToExecutionChangeSubnetTopicFormat: &ethpb.SignedBLSToExecutionChange{},
BlobSubnetTopicFormat: &ethpb.BlobSidecar{},
DataColumnSubnetTopicFormat: &ethpb.DataColumnSidecar{},
}
// GossipTopicMappings is a function to return the assigned data type

View File

@@ -3,6 +3,7 @@ package p2p
import (
"context"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/connmgr"
@@ -30,12 +31,18 @@ type P2P interface {
MetadataProvider
}
type Acceser interface {
Broadcaster
PeerManager
}
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
type Broadcaster interface {
Broadcast(context.Context, proto.Message) error
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
@@ -81,8 +88,9 @@ type PeerManager interface {
PeerID() peer.ID
Host() host.Host
ENR() *enr.Record
NodeID() enode.ID
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
RefreshENR()
RefreshPersistentSubnets()
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
}

View File

@@ -1,6 +1,7 @@
package p2p
import (
"net"
"strconv"
"strings"
@@ -12,32 +13,32 @@ import (
var log = logrus.WithField("prefix", "p2p")
func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
var correctAddr ma.Multiaddr
for _, addr := range addrs {
if strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/") {
correctAddr = addr
break
if !(strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/")) {
continue
}
}
if correctAddr != nil {
log.WithField(
"multiAddr",
correctAddr.String()+"/p2p/"+id.String(),
addr.String()+"/p2p/"+id.String(),
).Info("Node started p2p server")
}
}
func logExternalIPAddr(id peer.ID, addr string, port uint) {
func logExternalIPAddr(id peer.ID, addr string, tcpPort, quicPort uint) {
if addr != "" {
multiAddr, err := MultiAddressBuilder(addr, port)
multiAddrs, err := MultiAddressBuilder(net.ParseIP(addr), tcpPort, quicPort)
if err != nil {
log.WithError(err).Error("Could not create multiaddress")
return
}
log.WithField(
"multiAddr",
multiAddr.String()+"/p2p/"+id.String(),
).Info("Node started external p2p server")
for _, multiAddr := range multiAddrs {
log.WithField(
"multiAddr",
multiAddr.String()+"/p2p/"+id.String(),
).Info("Node started external p2p server")
}
}
}

View File

@@ -60,17 +60,21 @@ var (
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
"until a subnet peer can be found.",
})
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
blobSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_blob_sidecar_committee_broadcasts",
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
Help: "The number of blob sidecar messages that were broadcast with no peer on.",
})
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
Help: "The number of sync committee that were attempted to be broadcast.",
})
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
blobSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
Help: "The number of blob sidecar messages that were attempted to be broadcast.",
})
dataColumnSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_data_column_sidecar_attempted_broadcasts",
Help: "The number of data column sidecar messages that were attempted to be broadcast.",
})
// Gossip Tracer Metrics

View File

@@ -11,40 +11,68 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/security/noise"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
gomplex "github.com/libp2p/go-mplex"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/features"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
type internetProtocol string
const (
udp = "udp"
tcp = "tcp"
quic = "quic"
)
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
parsedIP := net.ParseIP(ipAddr)
if parsedIP.To4() == nil && parsedIP.To16() == nil {
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
func MultiAddressBuilder(ip net.IP, tcpPort, quicPort uint) ([]ma.Multiaddr, error) {
ipType, err := extractIpType(ip)
if err != nil {
return nil, errors.Wrap(err, "unable to determine IP type")
}
if parsedIP.To4() != nil {
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
// Example: /ip4/1.2.3.4./tcp/5678
multiaddrStr := fmt.Sprintf("/%s/%s/tcp/%d", ipType, ip, tcpPort)
multiAddrTCP, err := ma.NewMultiaddr(multiaddrStr)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce TCP multiaddr format from %s:%d", ip, tcpPort)
}
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
multiaddrs := []ma.Multiaddr{multiAddrTCP}
if features.Get().EnableQUIC {
// Example: /ip4/1.2.3.4/udp/5678/quic-v1
multiAddrQUIC, err := ma.NewMultiaddr(fmt.Sprintf("/%s/%s/udp/%d/quic-v1", ipType, ip, quicPort))
if err != nil {
return nil, errors.Wrapf(err, "cannot produce QUIC multiaddr format from %s:%d", ip, tcpPort)
}
multiaddrs = append(multiaddrs, multiAddrQUIC)
}
return multiaddrs, nil
}
// buildOptions for the libp2p host.
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
cfg := s.cfg
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
multiaddrs, err := MultiAddressBuilder(ip, cfg.TCPPort, cfg.QUICPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip.String(), cfg.TCPPort)
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip, cfg.TCPPort)
}
if cfg.LocalIP != "" {
if net.ParseIP(cfg.LocalIP) == nil {
localIP := net.ParseIP(cfg.LocalIP)
if localIP == nil {
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
}
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
multiaddrs, err = MultiAddressBuilder(localIP, cfg.TCPPort, cfg.QUICPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
}
@@ -62,36 +90,43 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
options := []libp2p.Option{
privKeyOption(priKey),
libp2p.ListenAddrs(listen),
libp2p.ListenAddrs(multiaddrs...),
libp2p.UserAgent(version.BuildData()),
libp2p.ConnectionGater(s),
libp2p.Transport(tcp.NewTCPTransport),
libp2p.Transport(libp2ptcp.NewTCPTransport),
libp2p.DefaultMuxers,
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
libp2p.Security(noise.ID, noise.New),
libp2p.Ping(false), // Disable Ping Service.
}
if features.Get().EnableQUIC {
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
}
if cfg.EnableUPnP {
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
}
if cfg.RelayNodeAddr != "" {
options = append(options, libp2p.AddrsFactory(withRelayAddrs(cfg.RelayNodeAddr)))
} else {
// Disable relay if it has not been set.
options = append(options, libp2p.DisableRelay())
}
if cfg.HostAddress != "" {
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
externalMultiaddrs, err := MultiAddressBuilder(net.ParseIP(cfg.HostAddress), cfg.TCPPort, cfg.QUICPort)
if err != nil {
log.WithError(err).Error("Unable to create external multiaddress")
} else {
addrs = append(addrs, external)
addrs = append(addrs, externalMultiaddrs...)
}
return addrs
}))
}
if cfg.HostDNS != "" {
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
external, err := ma.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%d", cfg.HostDNS, cfg.TCPPort))
@@ -107,21 +142,47 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
if features.Get().DisableResourceManager {
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
}
return options, nil
}
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
parsedIP := net.ParseIP(ipAddr)
if parsedIP.To4() == nil && parsedIP.To16() == nil {
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
func extractIpType(ip net.IP) (string, error) {
if ip.To4() != nil {
return "ip4", nil
}
if id.String() == "" {
return nil, errors.New("empty peer id given")
if ip.To16() != nil {
return "ip6", nil
}
if parsedIP.To4() != nil {
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
return "", errors.Errorf("provided IP address is neither IPv4 nor IPv6: %s", ip)
}
func multiAddressBuilderWithID(ip net.IP, protocol internetProtocol, port uint, id peer.ID) (ma.Multiaddr, error) {
var multiaddrStr string
if id == "" {
return nil, errors.Errorf("empty peer id given: %s", id)
}
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
ipType, err := extractIpType(ip)
if err != nil {
return nil, errors.Wrap(err, "unable to determine IP type")
}
switch protocol {
case udp, tcp:
// Example with UDP: /ip4/1.2.3.4/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
// Example with TCP: /ip6/1.2.3.4/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
multiaddrStr = fmt.Sprintf("/%s/%s/%s/%d/p2p/%s", ipType, ip, protocol, port, id)
case quic:
// Example: /ip4/1.2.3.4/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
multiaddrStr = fmt.Sprintf("/%s/%s/udp/%d/quic-v1/p2p/%s", ipType, ip, port, id)
default:
return nil, errors.Errorf("unsupported protocol: %s", protocol)
}
return ma.NewMultiaddr(multiaddrStr)
}
// Adds a private key to the libp2p option if the option was provided.

View File

@@ -13,6 +13,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -88,30 +89,34 @@ func TestIPV6Support(t *testing.T) {
lNode := enode.NewLocalNode(db, key)
mockIPV6 := net.IP{0xff, 0x02, 0xAA, 0, 0x1F, 0, 0x2E, 0, 0, 0x36, 0x45, 0, 0, 0, 0, 0x02}
lNode.Set(enr.IP(mockIPV6))
ma, err := convertToSingleMultiAddr(lNode.Node())
mas, err := retrieveMultiAddrsFromNode(lNode.Node())
if err != nil {
t.Fatal(err)
}
ipv6Exists := false
for _, p := range ma.Protocols() {
if p.Name == "ip4" {
t.Error("Got ip4 address instead of ip6")
for _, ma := range mas {
ipv6Exists := false
for _, p := range ma.Protocols() {
if p.Name == "ip4" {
t.Error("Got ip4 address instead of ip6")
}
if p.Name == "ip6" {
ipv6Exists = true
}
}
if p.Name == "ip6" {
ipv6Exists = true
if !ipv6Exists {
t.Error("Multiaddress did not have ipv6 protocol")
}
}
if !ipv6Exists {
t.Error("Multiaddress did not have ipv6 protocol")
}
}
func TestDefaultMultiplexers(t *testing.T) {
var cfg libp2p.Config
_ = cfg
p2pCfg := &Config{
TCPPort: 2000,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
StateNotifier: &mock.MockStateNotifier{},
}
svc := &Service{cfg: p2pCfg}
@@ -127,5 +132,57 @@ func TestDefaultMultiplexers(t *testing.T) {
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
}
func TestMultiAddressBuilderWithID(t *testing.T) {
testCases := []struct {
name string
ip net.IP
protocol internetProtocol
port uint
id string
expectedMultiaddrStr string
}{
{
name: "UDP",
ip: net.IPv4(192, 168, 0, 1),
protocol: udp,
port: 5678,
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
},
{
name: "TCP",
ip: net.IPv4(192, 168, 0, 1),
protocol: tcp,
port: 5678,
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
expectedMultiaddrStr: "/ip4/192.168.0.1/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
},
{
name: "QUIC",
ip: net.IPv4(192, 168, 0, 1),
protocol: quic,
port: 5678,
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
id, err := hex.DecodeString(tt.id)
require.NoError(t, err)
actualMultiaddr, err := multiAddressBuilderWithID(tt.ip, tt.protocol, tt.port, peer.ID(id))
require.NoError(t, err)
actualMultiaddrStr := actualMultiaddr.String()
require.Equal(t, tt.expectedMultiaddrStr, actualMultiaddrStr)
})
}
}

View File

@@ -1,7 +1,7 @@
// Package peers provides information about peers at the Ethereum consensus protocol level.
//
// "Protocol level" is the level above the network level, so this layer never sees or interacts with
// (for example) hosts that are uncontactable due to being down, firewalled, etc. Instead, this works
// (for example) hosts that are unreachable due to being down, firewalled, etc. Instead, this works
// with peers that are contactable but may or may not be of the correct fork version, not currently
// required due to the number of current connections, etc.
//
@@ -26,6 +26,7 @@ import (
"context"
"math"
"sort"
"strings"
"time"
"github.com/ethereum/go-ethereum/p2p/enr"
@@ -59,8 +60,8 @@ const (
)
const (
// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
ColocationLimit = 5
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
CollocationLimit = 5
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
maxLimitBuffer = 150
@@ -76,6 +77,13 @@ const (
MaxBackOffDuration = 5000
)
type InternetProtocol string
const (
TCP = "tcp"
QUIC = "quic"
)
// Status is the structure holding the peer status information.
type Status struct {
ctx context.Context
@@ -449,6 +457,19 @@ func (p *Status) InboundConnected() []peer.ID {
return peers
}
// InboundConnectedWithProtocol returns the current batch of inbound peers that are connected with a given protocol.
func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
return peers
}
// Outbound returns the current batch of outbound peers.
func (p *Status) Outbound() []peer.ID {
p.store.RLock()
@@ -475,7 +496,20 @@ func (p *Status) OutboundConnected() []peer.ID {
return peers
}
// Active returns the peers that are connecting or connected.
// OutboundConnectedWithProtocol returns the current batch of outbound peers that are connected with a given protocol.
func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
return peers
}
// Active returns the peers that are active (connecting or connected).
func (p *Status) Active() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
@@ -514,7 +548,7 @@ func (p *Status) Disconnected() []peer.ID {
return peers
}
// Inactive returns the peers that are disconnecting or disconnected.
// Inactive returns the peers that are inactive (disconnecting or disconnected).
func (p *Status) Inactive() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
@@ -548,7 +582,7 @@ func (p *Status) Prune() {
p.store.Lock()
defer p.store.Unlock()
// Default to old method if flag isnt enabled.
// Default to old method if flag isn't enabled.
if !features.Get().EnablePeerScorer {
p.deprecatedPrune()
return
@@ -961,7 +995,7 @@ func (p *Status) isfromBadIP(pid peer.ID) bool {
return true
}
if val, ok := p.ipTracker[ip.String()]; ok {
if val > ColocationLimit {
if val > CollocationLimit {
return true
}
}
@@ -1012,7 +1046,7 @@ func (p *Status) tallyIPTracker() {
}
func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
// Exit early if we do get nil multiaddresses
// Exit early if we do get nil multi-addresses
if firstAddr == nil || secondAddr == nil {
return false
}

View File

@@ -565,7 +565,7 @@ func TestPeerIPTracker(t *testing.T) {
badIP := "211.227.218.116"
var badPeers []peer.ID
for i := 0; i < peers.ColocationLimit+10; i++ {
for i := 0; i < peers.CollocationLimit+10; i++ {
port := strconv.Itoa(3000 + i)
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
if err != nil {
@@ -1111,6 +1111,87 @@ func TestInbound(t *testing.T) {
assert.Equal(t, inbound.String(), result[0].String())
}
func TestInboundConnected(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
result := p.InboundConnected()
require.Equal(t, 1, len(result))
assert.Equal(t, inbound.String(), result[0].String())
}
func TestInboundConnectedWithProtocol(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addrsTCP := []string{
"/ip4/127.0.0.1/tcp/33333",
"/ip4/127.0.0.2/tcp/44444",
}
addrsQUIC := []string{
"/ip4/192.168.1.3/udp/13000/quic-v1",
"/ip4/192.168.1.4/udp/14000/quic-v1",
"/ip4/192.168.1.5/udp/14000/quic-v1",
}
expectedTCP := make(map[string]bool, len(addrsTCP))
for _, addr := range addrsTCP {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
expectedTCP[peer.String()] = true
}
expectedQUIC := make(map[string]bool, len(addrsQUIC))
for _, addr := range addrsQUIC {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
expectedQUIC[peer.String()] = true
}
// TCP
// ---
actualTCP := p.InboundConnectedWithProtocol(peers.TCP)
require.Equal(t, len(expectedTCP), len(actualTCP))
for _, actualPeer := range actualTCP {
_, ok := expectedTCP[actualPeer.String()]
require.Equal(t, true, ok)
}
// QUIC
// ----
actualQUIC := p.InboundConnectedWithProtocol(peers.QUIC)
require.Equal(t, len(expectedQUIC), len(actualQUIC))
for _, actualPeer := range actualQUIC {
_, ok := expectedQUIC[actualPeer.String()]
require.Equal(t, true, ok)
}
}
func TestOutbound(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
@@ -1130,6 +1211,87 @@ func TestOutbound(t *testing.T) {
assert.Equal(t, outbound.String(), result[0].String())
}
func TestOutboundConnected(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
result := p.OutboundConnected()
require.Equal(t, 1, len(result))
assert.Equal(t, inbound.String(), result[0].String())
}
func TestOutboundConnectedWithProtocol(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addrsTCP := []string{
"/ip4/127.0.0.1/tcp/33333",
"/ip4/127.0.0.2/tcp/44444",
}
addrsQUIC := []string{
"/ip4/192.168.1.3/udp/13000/quic-v1",
"/ip4/192.168.1.4/udp/14000/quic-v1",
"/ip4/192.168.1.5/udp/14000/quic-v1",
}
expectedTCP := make(map[string]bool, len(addrsTCP))
for _, addr := range addrsTCP {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
expectedTCP[peer.String()] = true
}
expectedQUIC := make(map[string]bool, len(addrsQUIC))
for _, addr := range addrsQUIC {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
expectedQUIC[peer.String()] = true
}
// TCP
// ---
actualTCP := p.OutboundConnectedWithProtocol(peers.TCP)
require.Equal(t, len(expectedTCP), len(actualTCP))
for _, actualPeer := range actualTCP {
_, ok := expectedTCP[actualPeer.String()]
require.Equal(t, true, ok)
}
// QUIC
// ----
actualQUIC := p.OutboundConnectedWithProtocol(peers.QUIC)
require.Equal(t, len(expectedQUIC), len(actualQUIC))
for _, actualPeer := range actualQUIC {
_, ok := expectedQUIC[actualPeer.String()]
require.Equal(t, true, ok)
}
}
// addPeer is a helper to add a peer with a given connection state)
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
// Set up some peers with different states

View File

@@ -165,14 +165,14 @@ func (s *Service) pubsubOptions() []pubsub.Option {
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
addrs, err := PeersFromStringAddrs(peers)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers raw ENRs into multiaddresses: %v", err)
return nil, fmt.Errorf("cannot convert peers raw ENRs into multiaddresses: %v", err)
}
if len(addrs) == 0 {
return nil, fmt.Errorf("Converting peers raw ENRs into multiaddresses resulted in an empty list")
return nil, fmt.Errorf("converting peers raw ENRs into multiaddresses resulted in an empty list")
}
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers multiaddresses into AddrInfos: %v", err)
return nil, fmt.Errorf("cannot convert peers multiaddresses into AddrInfos: %v", err)
}
return directAddrInfos, nil
}

View File

@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
formatting := []interface{}{digest}
// Special case for attestation subnets which have a second formatting placeholder.
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat || topic == DataColumnSubnetTopicFormat {
formatting = append(formatting, 0 /* some subnet ID */)
}

View File

@@ -43,6 +43,9 @@ const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
const (
// V1 RPC Topics
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
@@ -65,6 +68,9 @@ const (
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root. New in PeerDAS.
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
// V2 RPC Topics
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.

View File

@@ -24,6 +24,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
prysmnetwork "github.com/prysmaticlabs/prysm/v5/network"
@@ -124,31 +125,34 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
if err != nil {
return nil, errors.Wrapf(err, "failed to build p2p options")
}
// Sets mplex timeouts
configureMplex()
h, err := libp2p.New(opts...)
if err != nil {
log.WithError(err).Error("Failed to create p2p host")
return nil, err
return nil, errors.Wrapf(err, "failed to create p2p host")
}
s.host = h
// Gossipsub registration is done before we add in any new peers
// due to libp2p's gossipsub implementation not taking into
// account previously added peers when creating the gossipsub
// object.
psOpts := s.pubsubOptions()
// Set the pubsub global parameters that we require.
setPubSubParameters()
// Reinitialize them in the event we are running a custom config.
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
gs, err := pubsub.NewGossipSub(s.ctx, s.host, psOpts...)
if err != nil {
log.WithError(err).Error("Failed to start pubsub")
return nil, err
return nil, errors.Wrapf(err, "failed to create p2p pubsub")
}
s.pubsub = gs
s.peers = peers.NewStatus(ctx, &peers.StatusConfig{
@@ -213,7 +217,7 @@ func (s *Service) Start() {
if len(s.cfg.StaticPeers) > 0 {
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
if err != nil {
log.WithError(err).Error("Could not connect to static peer")
log.WithError(err).Error("could not convert ENR to multiaddr")
}
// Set trusted peers for those that are provided as static addresses.
pids := peerIdsFromMultiAddrs(addrs)
@@ -222,7 +226,7 @@ func (s *Service) Start() {
}
// Initialize metadata according to the
// current epoch.
s.RefreshENR()
s.RefreshPersistentSubnets()
// Periodic functions.
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
@@ -230,13 +234,26 @@ func (s *Service) Start() {
})
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
async.RunEvery(s.ctx, refreshRate, s.RefreshPersistentSubnets)
async.RunEvery(s.ctx, 1*time.Minute, func() {
log.WithFields(logrus.Fields{
"inbound": len(s.peers.InboundConnected()),
"outbound": len(s.peers.OutboundConnected()),
"activePeers": len(s.peers.Active()),
}).Info("Peer summary")
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
outboundQUICCount := len(s.peers.OutboundConnectedWithProtocol(peers.QUIC))
outboundTCPCount := len(s.peers.OutboundConnectedWithProtocol(peers.TCP))
total := inboundQUICCount + inboundTCPCount + outboundQUICCount + outboundTCPCount
fields := logrus.Fields{
"inboundTCP": inboundTCPCount,
"outboundTCP": outboundTCPCount,
"total": total,
}
if features.Get().EnableQUIC {
fields["inboundQUIC"] = inboundQUICCount
fields["outboundQUIC"] = outboundQUICCount
}
log.WithFields(fields).Info("Connected peers")
})
multiAddrs := s.host.Network().ListenAddresses()
@@ -244,9 +261,10 @@ func (s *Service) Start() {
p2pHostAddress := s.cfg.HostAddress
p2pTCPPort := s.cfg.TCPPort
p2pQUICPort := s.cfg.QUICPort
if p2pHostAddress != "" {
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort)
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort, p2pQUICPort)
verifyConnectivity(p2pHostAddress, p2pTCPPort, "tcp")
}
@@ -340,6 +358,15 @@ func (s *Service) ENR() *enr.Record {
return s.dv5Listener.Self().Record()
}
// NodeID returns the local node's node ID
// for discovery.
func (s *Service) NodeID() enode.ID {
if s.dv5Listener == nil {
return [32]byte{}
}
return s.dv5Listener.Self().ID()
}
// DiscoveryAddresses represents our enr addresses as multiaddresses.
func (s *Service) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
if s.dv5Listener == nil {

View File

@@ -102,8 +102,9 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
cs := startup.NewClockSynchronizer()
cfg := &Config{
TCPPort: 2000,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
ClockWaiter: cs,
}
s, err := NewService(context.Background(), cfg)
@@ -147,8 +148,9 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
cs := startup.NewClockSynchronizer()
cfg := &Config{
TCPPort: 2000,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
StateNotifier: &mock.MockStateNotifier{},
NoDiscovery: true, // <-- no s.dv5Listener is created
ClockWaiter: cs,

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
@@ -34,8 +35,8 @@ var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
// The value used with the subnet, inorder
// to create an appropriate key to retrieve
// the relevant lock. This is used to differentiate
// sync subnets from attestation subnets. This is deliberately
// chosen as more than 64(attestation subnet count).
// sync subnets from others. This is deliberately
// chosen as more than 64 (attestation subnet count).
const syncLockerVal = 100
// The value used with the blob sidecar subnet, in order
@@ -45,6 +46,13 @@ const syncLockerVal = 100
// chosen more than sync and attestation subnet combined.
const blobSubnetLockerVal = 110
// The value used with the data column sidecar subnet, in order
// to create an appropriate key to retrieve
// the relevant lock. This is used to differentiate
// data column subnets from others. This is deliberately
// chosen more than sync, attestation and blob subnet (6) combined.
const dataColumnSubnetVal = 150
// FindPeersWithSubnet performs a network search for peers
// subscribed to a particular subnet. Then it tries to connect
// with those peers. This method will block until either:
@@ -93,6 +101,11 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
if err != nil {
continue
}
if info == nil {
continue
}
wg.Add(1)
go func() {
if err := s.connectWithPeer(ctx, *info); err != nil {
@@ -197,6 +210,25 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
return nil
}
func initializePersistentColumnSubnets(id enode.ID) error {
_, ok, expTime := cache.ColumnSubnetIDs.GetColumnSubnets()
if ok && expTime.After(time.Now()) {
return nil
}
subsMap, err := peerdas.CustodyColumnSubnets(id, params.BeaconConfig().CustodyRequirement)
if err != nil {
return err
}
subs := make([]uint64, 0, len(subsMap))
for sub := range subsMap {
subs = append(subs, sub)
}
cache.ColumnSubnetIDs.AddColumnSubnets(subs)
return nil
}
// Spec pseudocode definition:
//
// def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]:
@@ -346,10 +378,11 @@ func syncBitvector(record *enr.Record) (bitfield.Bitvector4, error) {
// The subnet locker is a map which keeps track of all
// mutexes stored per subnet. This locker is re-used
// between both the attestation and sync subnets. In
// order to differentiate between attestation and sync
// subnets. Sync subnets are stored by (subnet+syncLockerVal). This
// is to prevent conflicts while allowing both subnets
// between both the attestation, sync and blob subnets.
// Sync subnets are stored by (subnet+syncLockerVal).
// Blob subnets are stored by (subnet+blobSubnetLockerVal).
// Data column subnets are stored by (subnet+dataColumnSubnetVal).
// This is to prevent conflicts while allowing subnets
// to use a single locker.
func (s *Service) subnetLocker(i uint64) *sync.RWMutex {
s.subnetsLockLock.Lock()

View File

@@ -66,7 +66,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
genesisTime := time.Now()
bootNodeService := &Service{
cfg: &Config{TCPPort: 2000, UDPPort: 3000},
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
@@ -89,8 +89,9 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
service, err := NewService(ctx, &Config{
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
TCPPort: uint(2000 + i),
UDPPort: uint(3000 + i),
UDPPort: uint(2000 + i),
TCPPort: uint(3000 + i),
QUICPort: uint(3000 + i),
})
require.NoError(t, err)
@@ -133,8 +134,9 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
TCPPort: 2010,
UDPPort: 3010,
UDPPort: 2010,
TCPPort: 3010,
QUICPort: 3010,
}
service, err := NewService(ctx, cfg)

View File

@@ -3,6 +3,7 @@ package testing
import (
"context"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/control"
@@ -55,6 +56,11 @@ func (_ *FakeP2P) ENR() *enr.Record {
return new(enr.Record)
}
// NodeID returns the node id of the local peer.
func (_ *FakeP2P) NodeID() enode.ID {
return [32]byte{}
}
// DiscoveryAddresses -- fake
func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
return nil, nil
@@ -66,7 +72,7 @@ func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ i
}
// RefreshENR mocks the p2p func.
func (_ *FakeP2P) RefreshENR() {}
func (_ *FakeP2P) RefreshPersistentSubnets() {}
// LeaveTopic -- fake.
func (_ *FakeP2P) LeaveTopic(_ string) error {
@@ -148,6 +154,11 @@ func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSideca
return nil
}
// BroadcastDataColumn -- fake.
func (_ *FakeP2P) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
return nil
}
// InterceptPeerDial -- fake.
func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
return true

View File

@@ -48,6 +48,12 @@ func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.BlobSide
return nil
}
// BroadcastDataColumn broadcasts a data column for mock.
func (m *MockBroadcaster) BroadcastDataColumn(context.Context, uint64, *ethpb.DataColumnSidecar) error {
m.BroadcastCalled.Store(true)
return nil
}
// NumMessages returns the number of messages broadcasted.
func (m *MockBroadcaster) NumMessages() int {
m.msgLock.Lock()

View File

@@ -4,6 +4,7 @@ import (
"context"
"errors"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
@@ -39,6 +40,11 @@ func (m MockPeerManager) ENR() *enr.Record {
return m.Enr
}
// NodeID .
func (m MockPeerManager) NodeID() enode.ID {
return [32]byte{}
}
// DiscoveryAddresses .
func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
if m.FailDiscoveryAddr {
@@ -48,7 +54,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
}
// RefreshENR .
func (_ MockPeerManager) RefreshENR() {}
func (_ MockPeerManager) RefreshPersistentSubnets() {}
// FindPeersWithSubnet .
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {

View File

@@ -10,6 +10,7 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
pubsub "github.com/libp2p/go-libp2p-pubsub"
core "github.com/libp2p/go-libp2p/core"
@@ -183,6 +184,12 @@ func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.BlobSidecar) err
return nil
}
// BroadcastDataColumn broadcasts a data column for mock.
func (p *TestP2P) BroadcastDataColumn(context.Context, uint64, *ethpb.DataColumnSidecar) error {
p.BroadcastCalled.Store(true)
return nil
}
// SetStreamHandler for RPC.
func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
p.BHost.SetStreamHandler(protocol.ID(topic), handler)
@@ -263,6 +270,11 @@ func (_ *TestP2P) ENR() *enr.Record {
return new(enr.Record)
}
// NodeID returns the node id of the local peer.
func (_ *TestP2P) NodeID() enode.ID {
return [32]byte{}
}
// DiscoveryAddresses --
func (_ *TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
return nil, nil
@@ -361,7 +373,7 @@ func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ i
}
// RefreshENR mocks the p2p func.
func (_ *TestP2P) RefreshENR() {}
func (_ *TestP2P) RefreshPersistentSubnets() {}
// ForkDigest mocks the p2p func.
func (p *TestP2P) ForkDigest() ([4]byte, error) {

View File

@@ -30,6 +30,9 @@ const (
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
GossipBlobSidecarMessage = "blob_sidecar"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
// Topic Formats
//
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
@@ -52,4 +55,6 @@ const (
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
// BlobSubnetTopicFormat is the topic format for the blob subnet.
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
)

View File

@@ -9,10 +9,15 @@ var (
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
ErrGeneric = errors.New("internal service error")
ErrRateLimited = errors.New("rate limited")
ErrIODeadline = errors.New("i/o deadline exceeded")
ErrInvalidRequest = errors.New("invalid range, step or count")
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
ErrRateLimited = errors.New("rate limited")
ErrIODeadline = errors.New("i/o deadline exceeded")
ErrInvalidRequest = errors.New("invalid range, step or count")
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
ErrDataColumnLTMinRequest = errors.New("data column epoch < minimum_request_epoch")
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
ErrMaxDataColumnReqExceeded = errors.New("requested more than MAX_REQUEST_DATA_COLUMN_SIDECARS")
ErrResourceUnavailable = errors.New("resource requested unavailable")
ErrInvalidColumnIndex = errors.New("invalid column index requested")
)

View File

@@ -50,7 +50,7 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers *peers.Status
c := h.Network().ConnsToPeer(p.ID)
if len(c) == 0 {
if err := connectWithTimeout(ctx, h, p); err != nil {
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("failed to reconnect to peer")
continue
}
}

View File

@@ -37,6 +37,7 @@ go_library(
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
@@ -121,6 +122,7 @@ go_test(
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_stretchr_testify//mock:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],

View File

@@ -21,6 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/validator"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
@@ -32,6 +33,7 @@ import (
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -42,7 +44,8 @@ const (
)
var (
errNilBlock = errors.New("nil block")
errNilBlock = errors.New("nil block")
errEquivocatedBlock = errors.New("block is equivocated")
)
type handled bool
@@ -1254,6 +1257,16 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
},
}
if err = s.validateBroadcast(ctx, r, genericBlock); err != nil {
if errors.Is(err, errEquivocatedBlock) {
b, err := blocks.NewSignedBeaconBlock(genericBlock)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
if err := s.broadcastSeenBlockSidecars(ctx, b, genericBlock.GetDeneb().Blobs, genericBlock.GetDeneb().KzgProofs); err != nil {
log.WithError(err).Error("Failed to broadcast blob sidecars")
}
}
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
@@ -1383,6 +1396,16 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
consensusBlock, err = denebBlockContents.ToGeneric()
if err == nil {
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
if errors.Is(err, errEquivocatedBlock) {
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetDeneb().Blobs, consensusBlock.GetDeneb().KzgProofs); err != nil {
log.WithError(err).Error("Failed to broadcast blob sidecars")
}
}
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
@@ -1547,7 +1570,7 @@ func (s *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlyS
func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
if s.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
return fmt.Errorf("block for slot %d already exists in fork choice", blk.Slot())
return errors.Wrapf(errEquivocatedBlock, "block for slot %d already exists in fork choice", blk.Slot())
}
return nil
}
@@ -2072,3 +2095,37 @@ func (s *Server) GetDepositSnapshot(w http.ResponseWriter, r *http.Request) {
},
)
}
// Broadcast blob sidecars even if the block of the same slot has been imported.
// To ensure safety, we will only broadcast blob sidecars if the header references the same block that was previously seen.
// Otherwise, a proposer could get slashed through a different blob sidecar header reference.
func (s *Server) broadcastSeenBlockSidecars(
ctx context.Context,
b interfaces.SignedBeaconBlock,
blobs [][]byte,
kzgProofs [][]byte) error {
scs, err := validator.BuildBlobSidecars(b, blobs, kzgProofs)
if err != nil {
return err
}
for _, sc := range scs {
r, err := sc.SignedBlockHeader.Header.HashTreeRoot()
if err != nil {
log.WithError(err).Error("Failed to hash block header for blob sidecar")
continue
}
if !s.FinalizationFetcher.InForkchoice(r) {
log.WithField("root", fmt.Sprintf("%#x", r)).Debug("Block header not in forkchoice, skipping blob sidecar broadcast")
continue
}
if err := s.Broadcaster.BroadcastBlob(ctx, sc.Index, sc); err != nil {
log.WithError(err).Error("Failed to broadcast blob sidecar for index ", sc.Index)
}
log.WithFields(logrus.Fields{
"index": sc.Index,
"slot": sc.SignedBlockHeader.Header.Slot,
"kzgCommitment": fmt.Sprintf("%#x", sc.KzgCommitment),
}).Info("Broadcasted blob sidecar for already seen block")
}
return nil
}

View File

@@ -13,6 +13,8 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
mockp2p "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
logTest "github.com/sirupsen/logrus/hooks/test"
"go.uber.org/mock/gomock"
"github.com/gorilla/mux"
@@ -2472,7 +2474,9 @@ func TestValidateEquivocation(t *testing.T) {
require.NoError(t, err)
blk.SetSlot(st.Slot())
assert.ErrorContains(t, "already exists", server.validateEquivocation(blk.Block()))
err = server.validateEquivocation(blk.Block())
assert.ErrorContains(t, "already exists", err)
require.ErrorIs(t, err, errEquivocatedBlock)
})
}
@@ -3630,3 +3634,27 @@ func TestGetDepositSnapshot(t *testing.T) {
assert.Equal(t, finalized, len(resp.Finalized))
})
}
func TestServer_broadcastBlobSidecars(t *testing.T) {
hook := logTest.NewGlobal()
blockToPropose := util.NewBeaconBlockContentsDeneb()
blockToPropose.Blobs = [][]byte{{0x01}, {0x02}, {0x03}}
blockToPropose.KzgProofs = [][]byte{{0x01}, {0x02}, {0x03}}
blockToPropose.Block.Block.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte("kc"), 48), bytesutil.PadTo([]byte("kc1"), 48), bytesutil.PadTo([]byte("kc2"), 48)}
d := &eth.GenericSignedBeaconBlock_Deneb{Deneb: blockToPropose}
b := &eth.GenericSignedBeaconBlock{Block: d}
server := &Server{
Broadcaster: &mockp2p.MockBroadcaster{},
FinalizationFetcher: &chainMock.ChainService{NotFinalized: true},
}
blk, err := blocks.NewSignedBeaconBlock(b.Block)
require.NoError(t, err)
require.NoError(t, server.broadcastSeenBlockSidecars(context.Background(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs))
require.LogsDoNotContain(t, hook, "Broadcasted blob sidecar for already seen block")
server.FinalizationFetcher = &chainMock.ChainService{NotFinalized: false}
require.NoError(t, server.broadcastSeenBlockSidecars(context.Background(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs))
require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block")
}

View File

@@ -369,16 +369,7 @@ func decodeIds(w http.ResponseWriter, st state.BeaconState, rawIds []string, ign
func valsFromIds(w http.ResponseWriter, st state.BeaconState, ids []primitives.ValidatorIndex) ([]state.ReadOnlyValidator, bool) {
var vals []state.ReadOnlyValidator
if len(ids) == 0 {
allVals := st.Validators()
vals = make([]state.ReadOnlyValidator, len(allVals))
for i, val := range allVals {
readOnlyVal, err := statenative.NewValidator(val)
if err != nil {
httputil.HandleError(w, "Could not convert validator: "+err.Error(), http.StatusInternalServerError)
return nil, false
}
vals[i] = readOnlyVal
}
vals = st.ValidatorsReadOnly()
} else {
vals = make([]state.ReadOnlyValidator, 0, len(ids))
for _, id := range ids {

View File

@@ -78,6 +78,8 @@ func TestGetSpec(t *testing.T) {
config.CapellaForkEpoch = 103
config.DenebForkVersion = []byte("DenebForkVersion")
config.DenebForkEpoch = 105
config.ElectraForkVersion = []byte("ElectraForkVersion")
config.ElectraForkEpoch = 107
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.GenesisDelay = 24
@@ -170,293 +172,309 @@ func TestGetSpec(t *testing.T) {
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
assert.Equal(t, 129, len(data))
assert.Equal(t, 136, len(data))
for k, v := range data {
switch k {
case "CONFIG_NAME":
assert.Equal(t, "ConfigName", v)
case "PRESET_BASE":
assert.Equal(t, "PresetBase", v)
case "MAX_COMMITTEES_PER_SLOT":
assert.Equal(t, "1", v)
case "TARGET_COMMITTEE_SIZE":
assert.Equal(t, "2", v)
case "MAX_VALIDATORS_PER_COMMITTEE":
assert.Equal(t, "3", v)
case "MIN_PER_EPOCH_CHURN_LIMIT":
assert.Equal(t, "4", v)
case "CHURN_LIMIT_QUOTIENT":
assert.Equal(t, "5", v)
case "SHUFFLE_ROUND_COUNT":
assert.Equal(t, "6", v)
case "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":
assert.Equal(t, "7", v)
case "MIN_GENESIS_TIME":
assert.Equal(t, "8", v)
case "HYSTERESIS_QUOTIENT":
assert.Equal(t, "9", v)
case "HYSTERESIS_DOWNWARD_MULTIPLIER":
assert.Equal(t, "10", v)
case "HYSTERESIS_UPWARD_MULTIPLIER":
assert.Equal(t, "11", v)
case "SAFE_SLOTS_TO_UPDATE_JUSTIFIED":
assert.Equal(t, "0", v)
case "ETH1_FOLLOW_DISTANCE":
assert.Equal(t, "13", v)
case "TARGET_AGGREGATORS_PER_COMMITTEE":
assert.Equal(t, "14", v)
case "RANDOM_SUBNETS_PER_VALIDATOR":
assert.Equal(t, "15", v)
case "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION":
assert.Equal(t, "16", v)
case "SECONDS_PER_ETH1_BLOCK":
assert.Equal(t, "17", v)
case "DEPOSIT_CHAIN_ID":
assert.Equal(t, "18", v)
case "DEPOSIT_NETWORK_ID":
assert.Equal(t, "19", v)
case "DEPOSIT_CONTRACT_ADDRESS":
assert.Equal(t, "DepositContractAddress", v)
case "MIN_DEPOSIT_AMOUNT":
assert.Equal(t, "20", v)
case "MAX_EFFECTIVE_BALANCE":
assert.Equal(t, "21", v)
case "EJECTION_BALANCE":
assert.Equal(t, "22", v)
case "EFFECTIVE_BALANCE_INCREMENT":
assert.Equal(t, "23", v)
case "GENESIS_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("GenesisForkVersion")), v)
case "ALTAIR_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("AltairForkVersion")), v)
case "ALTAIR_FORK_EPOCH":
assert.Equal(t, "100", v)
case "BELLATRIX_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("BellatrixForkVersion")), v)
case "BELLATRIX_FORK_EPOCH":
assert.Equal(t, "101", v)
case "CAPELLA_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
case "CAPELLA_FORK_EPOCH":
assert.Equal(t, "103", v)
case "DENEB_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
case "DENEB_FORK_EPOCH":
assert.Equal(t, "105", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":
assert.Equal(t, "0x62", v)
case "ETH1_ADDRESS_WITHDRAWAL_PREFIX":
assert.Equal(t, "0x63", v)
case "GENESIS_DELAY":
assert.Equal(t, "24", v)
case "SECONDS_PER_SLOT":
assert.Equal(t, "25", v)
case "MIN_ATTESTATION_INCLUSION_DELAY":
assert.Equal(t, "26", v)
case "SLOTS_PER_EPOCH":
assert.Equal(t, "27", v)
case "MIN_SEED_LOOKAHEAD":
assert.Equal(t, "28", v)
case "MAX_SEED_LOOKAHEAD":
assert.Equal(t, "29", v)
case "EPOCHS_PER_ETH1_VOTING_PERIOD":
assert.Equal(t, "30", v)
case "SLOTS_PER_HISTORICAL_ROOT":
assert.Equal(t, "31", v)
case "MIN_VALIDATOR_WITHDRAWABILITY_DELAY":
assert.Equal(t, "32", v)
case "SHARD_COMMITTEE_PERIOD":
assert.Equal(t, "33", v)
case "MIN_EPOCHS_TO_INACTIVITY_PENALTY":
assert.Equal(t, "34", v)
case "EPOCHS_PER_HISTORICAL_VECTOR":
assert.Equal(t, "35", v)
case "EPOCHS_PER_SLASHINGS_VECTOR":
assert.Equal(t, "36", v)
case "HISTORICAL_ROOTS_LIMIT":
assert.Equal(t, "37", v)
case "VALIDATOR_REGISTRY_LIMIT":
assert.Equal(t, "38", v)
case "BASE_REWARD_FACTOR":
assert.Equal(t, "39", v)
case "WHISTLEBLOWER_REWARD_QUOTIENT":
assert.Equal(t, "40", v)
case "PROPOSER_REWARD_QUOTIENT":
assert.Equal(t, "41", v)
case "INACTIVITY_PENALTY_QUOTIENT":
assert.Equal(t, "42", v)
case "HF1_INACTIVITY_PENALTY_QUOTIENT":
assert.Equal(t, "43", v)
case "MIN_SLASHING_PENALTY_QUOTIENT":
assert.Equal(t, "44", v)
case "HF1_MIN_SLASHING_PENALTY_QUOTIENT":
assert.Equal(t, "45", v)
case "PROPORTIONAL_SLASHING_MULTIPLIER":
assert.Equal(t, "46", v)
case "HF1_PROPORTIONAL_SLASHING_MULTIPLIER":
assert.Equal(t, "47", v)
case "MAX_PROPOSER_SLASHINGS":
assert.Equal(t, "48", v)
case "MAX_ATTESTER_SLASHINGS":
assert.Equal(t, "49", v)
case "MAX_ATTESTATIONS":
assert.Equal(t, "50", v)
case "MAX_DEPOSITS":
assert.Equal(t, "51", v)
case "MAX_VOLUNTARY_EXITS":
assert.Equal(t, "52", v)
case "MAX_BLOBS_PER_BLOCK":
assert.Equal(t, "4", v)
case "TIMELY_HEAD_FLAG_INDEX":
assert.Equal(t, "0x35", v)
case "TIMELY_SOURCE_FLAG_INDEX":
assert.Equal(t, "0x36", v)
case "TIMELY_TARGET_FLAG_INDEX":
assert.Equal(t, "0x37", v)
case "TIMELY_HEAD_WEIGHT":
assert.Equal(t, "56", v)
case "TIMELY_SOURCE_WEIGHT":
assert.Equal(t, "57", v)
case "TIMELY_TARGET_WEIGHT":
assert.Equal(t, "58", v)
case "SYNC_REWARD_WEIGHT":
assert.Equal(t, "59", v)
case "WEIGHT_DENOMINATOR":
assert.Equal(t, "60", v)
case "TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":
assert.Equal(t, "61", v)
case "SYNC_COMMITTEE_SUBNET_COUNT":
assert.Equal(t, "62", v)
case "SYNC_COMMITTEE_SIZE":
assert.Equal(t, "63", v)
case "SYNC_PUBKEYS_PER_AGGREGATE":
assert.Equal(t, "64", v)
case "INACTIVITY_SCORE_BIAS":
assert.Equal(t, "65", v)
case "EPOCHS_PER_SYNC_COMMITTEE_PERIOD":
assert.Equal(t, "66", v)
case "INACTIVITY_PENALTY_QUOTIENT_ALTAIR":
assert.Equal(t, "67", v)
case "MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":
assert.Equal(t, "68", v)
case "PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":
assert.Equal(t, "69", v)
case "INACTIVITY_SCORE_RECOVERY_RATE":
assert.Equal(t, "70", v)
case "MIN_SYNC_COMMITTEE_PARTICIPANTS":
assert.Equal(t, "71", v)
case "PROPOSER_WEIGHT":
assert.Equal(t, "8", v)
case "DOMAIN_BEACON_PROPOSER":
assert.Equal(t, "0x30303031", v)
case "DOMAIN_BEACON_ATTESTER":
assert.Equal(t, "0x30303032", v)
case "DOMAIN_RANDAO":
assert.Equal(t, "0x30303033", v)
case "DOMAIN_DEPOSIT":
assert.Equal(t, "0x30303034", v)
case "DOMAIN_VOLUNTARY_EXIT":
assert.Equal(t, "0x30303035", v)
case "DOMAIN_SELECTION_PROOF":
assert.Equal(t, "0x30303036", v)
case "DOMAIN_AGGREGATE_AND_PROOF":
assert.Equal(t, "0x30303037", v)
case "DOMAIN_APPLICATION_MASK":
assert.Equal(t, "0x31303030", v)
case "DOMAIN_SYNC_COMMITTEE":
assert.Equal(t, "0x07000000", v)
case "DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":
assert.Equal(t, "0x08000000", v)
case "DOMAIN_CONTRIBUTION_AND_PROOF":
assert.Equal(t, "0x09000000", v)
case "DOMAIN_BLS_TO_EXECUTION_CHANGE":
assert.Equal(t, "0x0a000000", v)
case "DOMAIN_APPLICATION_BUILDER":
assert.Equal(t, "0x00000001", v)
case "DOMAIN_BLOB_SIDECAR":
assert.Equal(t, "0x00000000", v)
case "TRANSITION_TOTAL_DIFFICULTY":
assert.Equal(t, "0", v)
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
assert.Equal(t, "72", v)
case "TERMINAL_BLOCK_HASH":
s, ok := v.(string)
require.Equal(t, true, ok)
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(s))
case "TERMINAL_TOTAL_DIFFICULTY":
assert.Equal(t, "73", v)
case "DefaultFeeRecipient":
assert.Equal(t, common.HexToAddress("DefaultFeeRecipient"), v)
case "PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":
assert.Equal(t, "3", v)
case "MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":
assert.Equal(t, "32", v)
case "INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":
assert.Equal(t, "16777216", v)
case "PROPOSER_SCORE_BOOST":
assert.Equal(t, "40", v)
case "INTERVALS_PER_SLOT":
assert.Equal(t, "3", v)
case "MAX_WITHDRAWALS_PER_PAYLOAD":
assert.Equal(t, "74", v)
case "MAX_BLS_TO_EXECUTION_CHANGES":
assert.Equal(t, "75", v)
case "MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":
assert.Equal(t, "76", v)
case "REORG_MAX_EPOCHS_SINCE_FINALIZATION":
assert.Equal(t, "2", v)
case "REORG_WEIGHT_THRESHOLD":
assert.Equal(t, "20", v)
case "REORG_PARENT_WEIGHT_THRESHOLD":
assert.Equal(t, "160", v)
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
assert.Equal(t, "8", v)
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
assert.Equal(t, "128", v)
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
case "NODE_ID_BITS":
assert.Equal(t, "256", v)
case "ATTESTATION_SUBNET_EXTRA_BITS":
assert.Equal(t, "0", v)
case "ATTESTATION_SUBNET_PREFIX_BITS":
assert.Equal(t, "6", v)
case "SUBNETS_PER_NODE":
assert.Equal(t, "2", v)
case "EPOCHS_PER_SUBNET_SUBSCRIPTION":
assert.Equal(t, "256", v)
case "MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":
assert.Equal(t, "4096", v)
case "MAX_REQUEST_BLOB_SIDECARS":
assert.Equal(t, "768", v)
case "MESSAGE_DOMAIN_INVALID_SNAPPY":
assert.Equal(t, "0x00000000", v)
case "MESSAGE_DOMAIN_VALID_SNAPPY":
assert.Equal(t, "0x01000000", v)
case "ATTESTATION_PROPAGATION_SLOT_RANGE":
assert.Equal(t, "32", v)
case "RESP_TIMEOUT":
assert.Equal(t, "10", v)
case "TTFB_TIMEOUT":
assert.Equal(t, "5", v)
case "MIN_EPOCHS_FOR_BLOCK_REQUESTS":
assert.Equal(t, "33024", v)
case "GOSSIP_MAX_SIZE":
assert.Equal(t, "10485760", v)
case "MAX_CHUNK_SIZE":
assert.Equal(t, "10485760", v)
case "ATTESTATION_SUBNET_COUNT":
assert.Equal(t, "64", v)
case "MAXIMUM_GOSSIP_CLOCK_DISPARITY":
assert.Equal(t, "500", v)
case "MAX_REQUEST_BLOCKS":
assert.Equal(t, "1024", v)
case "MAX_REQUEST_BLOCKS_DENEB":
assert.Equal(t, "128", v)
default:
t.Errorf("Incorrect key: %s", k)
}
t.Run(k, func(t *testing.T) {
switch k {
case "CONFIG_NAME":
assert.Equal(t, "ConfigName", v)
case "PRESET_BASE":
assert.Equal(t, "PresetBase", v)
case "MAX_COMMITTEES_PER_SLOT":
assert.Equal(t, "1", v)
case "TARGET_COMMITTEE_SIZE":
assert.Equal(t, "2", v)
case "MAX_VALIDATORS_PER_COMMITTEE":
assert.Equal(t, "3", v)
case "MIN_PER_EPOCH_CHURN_LIMIT":
assert.Equal(t, "4", v)
case "CHURN_LIMIT_QUOTIENT":
assert.Equal(t, "5", v)
case "SHUFFLE_ROUND_COUNT":
assert.Equal(t, "6", v)
case "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":
assert.Equal(t, "7", v)
case "MIN_GENESIS_TIME":
assert.Equal(t, "8", v)
case "HYSTERESIS_QUOTIENT":
assert.Equal(t, "9", v)
case "HYSTERESIS_DOWNWARD_MULTIPLIER":
assert.Equal(t, "10", v)
case "HYSTERESIS_UPWARD_MULTIPLIER":
assert.Equal(t, "11", v)
case "SAFE_SLOTS_TO_UPDATE_JUSTIFIED":
assert.Equal(t, "0", v)
case "ETH1_FOLLOW_DISTANCE":
assert.Equal(t, "13", v)
case "TARGET_AGGREGATORS_PER_COMMITTEE":
assert.Equal(t, "14", v)
case "RANDOM_SUBNETS_PER_VALIDATOR":
assert.Equal(t, "15", v)
case "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION":
assert.Equal(t, "16", v)
case "SECONDS_PER_ETH1_BLOCK":
assert.Equal(t, "17", v)
case "DEPOSIT_CHAIN_ID":
assert.Equal(t, "18", v)
case "DEPOSIT_NETWORK_ID":
assert.Equal(t, "19", v)
case "DEPOSIT_CONTRACT_ADDRESS":
assert.Equal(t, "DepositContractAddress", v)
case "MIN_DEPOSIT_AMOUNT":
assert.Equal(t, "20", v)
case "MAX_EFFECTIVE_BALANCE":
assert.Equal(t, "21", v)
case "EJECTION_BALANCE":
assert.Equal(t, "22", v)
case "EFFECTIVE_BALANCE_INCREMENT":
assert.Equal(t, "23", v)
case "GENESIS_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("GenesisForkVersion")), v)
case "ALTAIR_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("AltairForkVersion")), v)
case "ALTAIR_FORK_EPOCH":
assert.Equal(t, "100", v)
case "BELLATRIX_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("BellatrixForkVersion")), v)
case "BELLATRIX_FORK_EPOCH":
assert.Equal(t, "101", v)
case "CAPELLA_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
case "CAPELLA_FORK_EPOCH":
assert.Equal(t, "103", v)
case "DENEB_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
case "DENEB_FORK_EPOCH":
assert.Equal(t, "105", v)
case "ELECTRA_FORK_VERSION":
assert.Equal(t, "0x"+hex.EncodeToString([]byte("ElectraForkVersion")), v)
case "ELECTRA_FORK_EPOCH":
assert.Equal(t, "107", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":
assert.Equal(t, "0x62", v)
case "ETH1_ADDRESS_WITHDRAWAL_PREFIX":
assert.Equal(t, "0x63", v)
case "GENESIS_DELAY":
assert.Equal(t, "24", v)
case "SECONDS_PER_SLOT":
assert.Equal(t, "25", v)
case "MIN_ATTESTATION_INCLUSION_DELAY":
assert.Equal(t, "26", v)
case "SLOTS_PER_EPOCH":
assert.Equal(t, "27", v)
case "MIN_SEED_LOOKAHEAD":
assert.Equal(t, "28", v)
case "MAX_SEED_LOOKAHEAD":
assert.Equal(t, "29", v)
case "EPOCHS_PER_ETH1_VOTING_PERIOD":
assert.Equal(t, "30", v)
case "SLOTS_PER_HISTORICAL_ROOT":
assert.Equal(t, "31", v)
case "MIN_VALIDATOR_WITHDRAWABILITY_DELAY":
assert.Equal(t, "32", v)
case "SHARD_COMMITTEE_PERIOD":
assert.Equal(t, "33", v)
case "MIN_EPOCHS_TO_INACTIVITY_PENALTY":
assert.Equal(t, "34", v)
case "EPOCHS_PER_HISTORICAL_VECTOR":
assert.Equal(t, "35", v)
case "EPOCHS_PER_SLASHINGS_VECTOR":
assert.Equal(t, "36", v)
case "HISTORICAL_ROOTS_LIMIT":
assert.Equal(t, "37", v)
case "VALIDATOR_REGISTRY_LIMIT":
assert.Equal(t, "38", v)
case "BASE_REWARD_FACTOR":
assert.Equal(t, "39", v)
case "WHISTLEBLOWER_REWARD_QUOTIENT":
assert.Equal(t, "40", v)
case "PROPOSER_REWARD_QUOTIENT":
assert.Equal(t, "41", v)
case "INACTIVITY_PENALTY_QUOTIENT":
assert.Equal(t, "42", v)
case "HF1_INACTIVITY_PENALTY_QUOTIENT":
assert.Equal(t, "43", v)
case "MIN_SLASHING_PENALTY_QUOTIENT":
assert.Equal(t, "44", v)
case "HF1_MIN_SLASHING_PENALTY_QUOTIENT":
assert.Equal(t, "45", v)
case "PROPORTIONAL_SLASHING_MULTIPLIER":
assert.Equal(t, "46", v)
case "HF1_PROPORTIONAL_SLASHING_MULTIPLIER":
assert.Equal(t, "47", v)
case "MAX_PROPOSER_SLASHINGS":
assert.Equal(t, "48", v)
case "MAX_ATTESTER_SLASHINGS":
assert.Equal(t, "49", v)
case "MAX_ATTESTATIONS":
assert.Equal(t, "50", v)
case "MAX_DEPOSITS":
assert.Equal(t, "51", v)
case "MAX_VOLUNTARY_EXITS":
assert.Equal(t, "52", v)
case "MAX_BLOBS_PER_BLOCK":
assert.Equal(t, "4", v)
case "TIMELY_HEAD_FLAG_INDEX":
assert.Equal(t, "0x35", v)
case "TIMELY_SOURCE_FLAG_INDEX":
assert.Equal(t, "0x36", v)
case "TIMELY_TARGET_FLAG_INDEX":
assert.Equal(t, "0x37", v)
case "TIMELY_HEAD_WEIGHT":
assert.Equal(t, "56", v)
case "TIMELY_SOURCE_WEIGHT":
assert.Equal(t, "57", v)
case "TIMELY_TARGET_WEIGHT":
assert.Equal(t, "58", v)
case "SYNC_REWARD_WEIGHT":
assert.Equal(t, "59", v)
case "WEIGHT_DENOMINATOR":
assert.Equal(t, "60", v)
case "TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":
assert.Equal(t, "61", v)
case "SYNC_COMMITTEE_SUBNET_COUNT":
assert.Equal(t, "62", v)
case "SYNC_COMMITTEE_SIZE":
assert.Equal(t, "63", v)
case "SYNC_PUBKEYS_PER_AGGREGATE":
assert.Equal(t, "64", v)
case "INACTIVITY_SCORE_BIAS":
assert.Equal(t, "65", v)
case "EPOCHS_PER_SYNC_COMMITTEE_PERIOD":
assert.Equal(t, "66", v)
case "INACTIVITY_PENALTY_QUOTIENT_ALTAIR":
assert.Equal(t, "67", v)
case "MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":
assert.Equal(t, "68", v)
case "PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":
assert.Equal(t, "69", v)
case "INACTIVITY_SCORE_RECOVERY_RATE":
assert.Equal(t, "70", v)
case "MIN_SYNC_COMMITTEE_PARTICIPANTS":
assert.Equal(t, "71", v)
case "PROPOSER_WEIGHT":
assert.Equal(t, "8", v)
case "DOMAIN_BEACON_PROPOSER":
assert.Equal(t, "0x30303031", v)
case "DOMAIN_BEACON_ATTESTER":
assert.Equal(t, "0x30303032", v)
case "DOMAIN_RANDAO":
assert.Equal(t, "0x30303033", v)
case "DOMAIN_DEPOSIT":
assert.Equal(t, "0x30303034", v)
case "DOMAIN_VOLUNTARY_EXIT":
assert.Equal(t, "0x30303035", v)
case "DOMAIN_SELECTION_PROOF":
assert.Equal(t, "0x30303036", v)
case "DOMAIN_AGGREGATE_AND_PROOF":
assert.Equal(t, "0x30303037", v)
case "DOMAIN_APPLICATION_MASK":
assert.Equal(t, "0x31303030", v)
case "DOMAIN_SYNC_COMMITTEE":
assert.Equal(t, "0x07000000", v)
case "DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":
assert.Equal(t, "0x08000000", v)
case "DOMAIN_CONTRIBUTION_AND_PROOF":
assert.Equal(t, "0x09000000", v)
case "DOMAIN_BLS_TO_EXECUTION_CHANGE":
assert.Equal(t, "0x0a000000", v)
case "DOMAIN_APPLICATION_BUILDER":
assert.Equal(t, "0x00000001", v)
case "DOMAIN_BLOB_SIDECAR":
assert.Equal(t, "0x00000000", v)
case "TRANSITION_TOTAL_DIFFICULTY":
assert.Equal(t, "0", v)
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
assert.Equal(t, "72", v)
case "TERMINAL_BLOCK_HASH":
s, ok := v.(string)
require.Equal(t, true, ok)
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(s))
case "TERMINAL_TOTAL_DIFFICULTY":
assert.Equal(t, "73", v)
case "DefaultFeeRecipient":
assert.Equal(t, common.HexToAddress("DefaultFeeRecipient"), v)
case "PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":
assert.Equal(t, "3", v)
case "MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":
assert.Equal(t, "32", v)
case "INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":
assert.Equal(t, "16777216", v)
case "PROPOSER_SCORE_BOOST":
assert.Equal(t, "40", v)
case "INTERVALS_PER_SLOT":
assert.Equal(t, "3", v)
case "MAX_WITHDRAWALS_PER_PAYLOAD":
assert.Equal(t, "74", v)
case "MAX_BLS_TO_EXECUTION_CHANGES":
assert.Equal(t, "75", v)
case "MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":
assert.Equal(t, "76", v)
case "REORG_MAX_EPOCHS_SINCE_FINALIZATION":
assert.Equal(t, "2", v)
case "REORG_WEIGHT_THRESHOLD":
assert.Equal(t, "20", v)
case "REORG_PARENT_WEIGHT_THRESHOLD":
assert.Equal(t, "160", v)
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
assert.Equal(t, "8", v)
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
assert.Equal(t, "128", v)
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
case "NODE_ID_BITS":
assert.Equal(t, "256", v)
case "ATTESTATION_SUBNET_EXTRA_BITS":
assert.Equal(t, "0", v)
case "ATTESTATION_SUBNET_PREFIX_BITS":
assert.Equal(t, "6", v)
case "SUBNETS_PER_NODE":
assert.Equal(t, "2", v)
case "EPOCHS_PER_SUBNET_SUBSCRIPTION":
assert.Equal(t, "256", v)
case "MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":
assert.Equal(t, "4096", v)
case "MAX_REQUEST_BLOB_SIDECARS":
assert.Equal(t, "768", v)
case "MESSAGE_DOMAIN_INVALID_SNAPPY":
assert.Equal(t, "0x00000000", v)
case "MESSAGE_DOMAIN_VALID_SNAPPY":
assert.Equal(t, "0x01000000", v)
case "ATTESTATION_PROPAGATION_SLOT_RANGE":
assert.Equal(t, "32", v)
case "RESP_TIMEOUT":
assert.Equal(t, "10", v)
case "TTFB_TIMEOUT":
assert.Equal(t, "5", v)
case "MIN_EPOCHS_FOR_BLOCK_REQUESTS":
assert.Equal(t, "33024", v)
case "GOSSIP_MAX_SIZE":
assert.Equal(t, "10485760", v)
case "MAX_CHUNK_SIZE":
assert.Equal(t, "10485760", v)
case "ATTESTATION_SUBNET_COUNT":
assert.Equal(t, "64", v)
case "MAXIMUM_GOSSIP_CLOCK_DISPARITY":
assert.Equal(t, "500", v)
case "MAX_REQUEST_BLOCKS":
assert.Equal(t, "1024", v)
case "MAX_REQUEST_BLOCKS_DENEB":
assert.Equal(t, "128", v)
case "NUMBER_OF_COLUMNS":
assert.Equal(t, "128", v)
case "MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA":
assert.Equal(t, "128000000000", v)
case "MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT":
assert.Equal(t, "256000000000", v)
case "DATA_COLUMN_SIDECAR_SUBNET_COUNT":
assert.Equal(t, "32", v)
case "MAX_REQUEST_DATA_COLUMN_SIDECARS":
assert.Equal(t, "16384", v)
default:
t.Errorf("Incorrect key: %s", k)
}
})
}
}

View File

@@ -108,7 +108,7 @@ func (*Server) GetVersion(w http.ResponseWriter, r *http.Request) {
// GetHealth returns node health status in http status codes. Useful for load balancers.
func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "node.GetHealth")
ctx, span := trace.StartSpan(r.Context(), "node.GetHealth")
defer span.End()
rawSyncingStatus, syncingStatus, ok := shared.UintFromQuery(w, r, "syncing_status", false)
@@ -119,10 +119,14 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
return
}
if s.SyncChecker.Synced() {
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
}
if s.SyncChecker.Synced() && !optimistic {
return
}
if s.SyncChecker.Syncing() || s.SyncChecker.Initialized() {
if s.SyncChecker.Syncing() || optimistic {
if rawSyncingStatus != "" {
w.WriteHeader(intSyncingStatus)
} else {
@@ -130,5 +134,6 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
}
return
}
w.WriteHeader(http.StatusServiceUnavailable)
}

View File

@@ -91,8 +91,10 @@ func TestGetVersion(t *testing.T) {
func TestGetHealth(t *testing.T) {
checker := &syncmock.Sync{}
optimisticFetcher := &mock.ChainService{Optimistic: false}
s := &Server{
SyncChecker: checker,
SyncChecker: checker,
OptimisticModeFetcher: optimisticFetcher,
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
@@ -101,25 +103,30 @@ func TestGetHealth(t *testing.T) {
s.GetHealth(writer, request)
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
checker.IsInitialized = true
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
writer = httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetHealth(writer, request)
assert.Equal(t, http.StatusPartialContent, writer.Code)
checker.IsSyncing = true
checker.IsSynced = false
request = httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com/eth/v1/node/health?syncing_status=%d", http.StatusPaymentRequired), nil)
writer = httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetHealth(writer, request)
assert.Equal(t, http.StatusPaymentRequired, writer.Code)
checker.IsSyncing = false
checker.IsSynced = true
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
writer = httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetHealth(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
checker.IsSyncing = false
checker.IsSynced = true
optimisticFetcher.Optimistic = true
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
writer = httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetHealth(writer, request)
assert.Equal(t, http.StatusPartialContent, writer.Code)
}
func TestGetIdentity(t *testing.T) {

View File

@@ -223,7 +223,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
return nil, &core.RpcError{Err: errors.Wrap(err, "failed to retrieve block from db"), Reason: core.Internal}
}
// if block is not in the retention window return 200 w/ empty list
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
return make([]*blocks.VerifiedROBlob, 0), nil
}
commitments, err := b.Block().Body().BlobKzgCommitments()

View File

@@ -164,8 +164,7 @@ func TestGetBlob(t *testing.T) {
db := testDB.SetupDB(t)
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
require.NoError(t, db.SaveBlock(context.Background(), denebBlock))
_, bs, err := filesystem.NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
_, bs := filesystem.NewEphemeralBlobStorageWithFs(t)
testSidecars, err := verification.BlobSidecarSliceNoop(blobs)
require.NoError(t, err)
for i := range testSidecars {

View File

@@ -1,3 +1,5 @@
# gazelle:ignore
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
@@ -44,6 +46,7 @@ go_library(
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -88,6 +91,7 @@ go_library(
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_protobuf//ptypes/empty",
@@ -173,7 +177,6 @@ common_deps = [
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
]
# gazelle:ignore
go_test(
name = "go_default_test",
timeout = "moderate",

View File

@@ -7,6 +7,7 @@ import (
"sync"
"time"
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
emptypb "github.com/golang/protobuf/ptypes/empty"
@@ -18,9 +19,12 @@ import (
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -251,7 +255,15 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
}
// ProposeBeaconBlock handles the proposal of beacon blocks.
// TODO: Add tests
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
var (
blobSidecars []*ethpb.BlobSidecar
dataColumnSideCars []*ethpb.DataColumnSidecar
)
isPeerDASEnabled := features.Get().EnablePeerDAS
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
defer span.End()
@@ -264,14 +276,16 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
}
var sidecars []*ethpb.BlobSidecar
if block.IsBlinded() {
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
block, blobSidecars, dataColumnSideCars, err = vs.handleBlindedBlock(ctx, block, isPeerDASEnabled)
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle blinded block", err)
}
} else {
sidecars, err = vs.handleUnblindedBlock(block, req)
}
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
blobSidecars, dataColumnSideCars, err = handleUnblidedBlock(block, req, isPeerDASEnabled)
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle unblided block", err)
}
}
root, err := block.Block().HashTreeRoot()
@@ -292,8 +306,14 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
errChan <- nil
}()
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
if isPeerDASEnabled {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive data columns: %v", err)
}
} else {
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
}
}
wg.Wait()
@@ -305,47 +325,83 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
}
// handleBlindedBlock processes blinded beacon blocks.
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock, isPeerDASEnabled bool) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
if block.Version() < version.Bellatrix {
return nil, nil, errors.New("pre-Bellatrix blinded block")
return nil, nil, nil, errors.New("pre-Bellatrix blinded block")
}
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
return nil, nil, errors.New("unconfigured block builder")
return nil, nil, nil, errors.New("unconfigured block builder")
}
copiedBlock, err := block.Copy()
if err != nil {
return nil, nil, err
return nil, nil, nil, errors.Wrap(err, "block copy")
}
payload, bundle, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, block)
if err != nil {
return nil, nil, errors.Wrap(err, "submit blinded block failed")
return nil, nil, nil, errors.Wrap(err, "submit blinded block")
}
if err := copiedBlock.Unblind(payload); err != nil {
return nil, nil, errors.Wrap(err, "unblind failed")
return nil, nil, nil, errors.Wrap(err, "unblind")
}
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
if isPeerDASEnabled {
dataColumnSideCars, err := unblindDataColumnsSidecars(copiedBlock, bundle)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "unblind data columns sidecars")
}
return copiedBlock, nil, dataColumnSideCars, nil
}
blobSidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
if err != nil {
return nil, nil, errors.Wrap(err, "unblind sidecars failed")
return nil, nil, nil, errors.Wrap(err, "unblind blobs sidecars")
}
return copiedBlock, sidecars, nil
return copiedBlock, blobSidecars, nil, nil
}
// handleUnblindedBlock processes unblinded beacon blocks.
func (vs *Server) handleUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
func handleUnblidedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock, isPeerDASEnabled bool) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
dbBlockContents := req.GetDeneb()
if dbBlockContents == nil {
return nil, nil
return nil, nil, nil
}
return buildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
if isPeerDASEnabled {
// Convert blobs from slices to array.
blobs := make([]cKzg4844.Blob, 0, len(dbBlockContents.Blobs))
for _, blob := range dbBlockContents.Blobs {
if len(blob) != cKzg4844.BytesPerBlob {
return nil, nil, errors.Errorf("invalid blob size. expected %d bytes, got %d bytes", cKzg4844.BytesPerBlob, len(blob))
}
blobs = append(blobs, cKzg4844.Blob(blob))
}
dataColumnSideCars, err := peerdas.DataColumnSidecars(block, blobs)
if err != nil {
return nil, nil, errors.Wrap(err, "data column sidecars")
}
return nil, dataColumnSideCars, nil
}
blobSidecars, err := BuildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
if err != nil {
return nil, nil, errors.Wrap(err, "build blob sidecars")
}
return blobSidecars, nil, nil
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
protoBlock, err := block.Proto()
if err != nil {
return errors.Wrap(err, "protobuf conversion failed")
@@ -361,7 +417,7 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
}
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
for i, sc := range sidecars {
if err := vs.P2P.BroadcastBlob(ctx, uint64(i), sc); err != nil {
return errors.Wrap(err, "broadcast blob failed")
@@ -371,10 +427,12 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
if err != nil {
return errors.Wrap(err, "ROBlob creation failed")
}
verifiedBlob := blocks.NewVerifiedROBlob(readOnlySc)
if err := vs.BlobReceiver.ReceiveBlob(ctx, verifiedBlob); err != nil {
return errors.Wrap(err, "receive blob failed")
}
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.BlobSidecarReceived,
Data: &operation.BlobSidecarReceivedData{Blob: &verifiedBlob},
@@ -383,6 +441,31 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
return nil
}
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, sidecars []*ethpb.DataColumnSidecar, root [fieldparams.RootLength]byte) error {
for i, sidecar := range sidecars {
if err := vs.P2P.BroadcastDataColumn(ctx, uint64(i), sidecar); err != nil {
return errors.Wrap(err, "broadcast data column")
}
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecar, root)
if err != nil {
return errors.Wrap(err, "new read-only data column with root")
}
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
if err := vs.DataColumnReceiver.ReceiveDataColumn(ctx, verifiedRODataColumn); err != nil {
return errors.Wrap(err, "receive data column")
}
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.DataColumnSidecarReceived,
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn},
})
}
return nil
}
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
func (vs *Server) PrepareBeaconProposer(
_ context.Context, request *ethpb.PrepareBeaconProposerRequest,

View File

@@ -56,8 +56,8 @@ func (c *blobsBundleCache) prune(minSlot primitives.Slot) {
}
}
// buildBlobSidecars given a block, builds the blob sidecars for the block.
func buildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
// BuildBlobSidecars given a block, builds the blob sidecars for the block.
func BuildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
if blk.Version() < version.Deneb {
return nil, nil // No blobs before deneb.
}

View File

@@ -51,7 +51,7 @@ func TestServer_buildBlobSidecars(t *testing.T) {
require.NoError(t, blk.SetBlobKzgCommitments(kzgCommitments))
proof, err := hexutil.Decode("0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
require.NoError(t, err)
scs, err := buildBlobSidecars(blk, [][]byte{
scs, err := BuildBlobSidecars(blk, [][]byte{
make([]byte, fieldparams.BlobLength), make([]byte, fieldparams.BlobLength),
}, [][]byte{
proof, proof,

Some files were not shown because too many files have changed in this diff Show More