Compare commits

...

54 Commits

Author SHA1 Message Date
terence tsao
cdd44dd0b4 Add disable-get-blobs flag to skip data column sidecars from execution layer 2025-08-11 20:59:11 -07:00
terence tsao
edfa363c47 Clean up builder bid validation logic and improve version compatibility checking 2025-08-01 09:20:21 -07:00
Kasey Kirkham
88b480fd45 omit NetworkScheduleEntry fields that are not part of BlobScheduleEntry 2025-07-31 20:45:55 +02:00
Manu NALEPA
448903a05e Fix tests. 2025-07-31 16:03:00 +02:00
Manu NALEPA
b3a1d47f3d Merge branch 'fusaka-nfd' into fusaka-devnet-3 2025-07-31 16:02:38 +02:00
Manu NALEPA
fbcb686636 Merge branch 'refactor-fork-schedules' into fusaka-devnet-3 2025-07-31 16:01:34 +02:00
Manu NALEPA
0f5509119b Merge branch 'init-genesis-asap' into fusaka-devnet-3 2025-07-31 15:55:28 +02:00
Manu NALEPA
6d4e1d5f7a Merge branch 'develop' into peerDAS 2025-07-31 15:53:56 +02:00
james-prysm
bd6b4ecd5b wrapping goodbye messages in goroutine to speed up node shutdown (#15542)
* wrapping goodbye messages in goroutine to speed up node shutdown

* fixing requirement
2025-07-31 12:52:31 +00:00
Manu NALEPA
415622ec49 Merge branch 'develop' into peerDAS 2025-07-31 14:42:39 +02:00
Manu NALEPA
df65458834 refactor 2025-07-31 14:42:17 +02:00
Manu NALEPA
2005d5c6f2 step 2: Reconstruct if needed. 2025-07-31 14:42:17 +02:00
Manu NALEPA
7d72fbebe7 step 1: Retrieve from DB. 2025-07-31 14:42:17 +02:00
Potuz
d7d8764a91 Trigger payload attribute event on early blocks (#15541)
Currently the payload attribute events is triggered on
`forkchoiceUpodateWithExecution`. However when we import an early block,
we do not call this function, we make two calls to FCU, the first one is
on a locked path at the end of `postBlockProcess` and this call is made
without any payload attributes to avoid updating the shuffling caches.

The second call is made on `handleSecondFCUCall` which calls directly
`notifyForkchoiceUpdate` bypassing the call to
`forkchoiceUpdateWithExecution`, but this call is the one that actually
computes the payload attributes. So the event handler is never called
with the new attributes.

This PR moves the event trigger to the same place where we actually call
FCU with the computed payload attributes.

Some considerations with forkchoice locking logic: since the calls are
always in a go routine, anyway the routine will wait to forkchoice to be
unlocked to proceed.

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-07-30 19:34:34 +00:00
Muzry
9b7f91d947 bugfix: submitPoolSyncCommitteeSignatures response inconsistent (#15516)
* fix: submitPoolSyncCommitteeSignatures reponse inconsistent

* update: bazel build file

* update: add changelog fragment file

* update api/server/structs/BUILD.bazel format

* update the unit test

* update: the error format

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-07-29 16:08:28 +00:00
terence
57e27199bd Fix builder bid version compatibility to support Electra bids with Fulu blocks (#15536) 2025-07-29 14:16:05 +00:00
Manu NALEPA
685761666d Merge branch 'develop' into peerDAS 2025-07-28 20:31:48 +02:00
Potuz
11ca766ed6 Add timing metric for PublishBlockV2 endpoint (#15539)
This commit adds a Prometheus histogram metric to measure the processing
duration of the PublishBlockV2 beacon API endpoint in milliseconds.

The metric covers the complete request processing time including:
- Request validation and parsing
- Block decoding (SSZ/JSON)
- Broadcast validation checks
- Block proposal through ProposeBeaconBlock
- All synchronous operations and awaited goroutines

Background operations that run in goroutines (block broadcasting, blob
sidecar processing) are included in the timing since the main function
waits for their completion before returning.

Files changed:
- beacon-chain/rpc/eth/beacon/metrics.go: New metric definition
- beacon-chain/rpc/eth/beacon/handlers.go: Timing instrumentation
- beacon-chain/rpc/eth/beacon/BUILD.bazel: Added metrics.go and Prometheus deps
- changelog/potuz_add_publishv2_metric.md: Changelog entry

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-authored-by: Claude <noreply@anthropic.com>
2025-07-28 18:20:57 +00:00
terence
cd6cc76d58 Add BLOB_SCHEDULE to eth/v1/config/spec endpoint (#15485)
* Beacon api: fix get config blob schedule

* Numbers should be string instead of float

* more generalized implementation for nested objects

* removing unused function

* fixing linting

* removing redundant switch fields

* adding additional log for debugging

* Fix build.

* adding skip function based on kasey's recommendation

* fixing test

---------

Co-authored-by: james-prysm <james@prysmaticlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-07-25 19:06:10 +00:00
terence
fc4a1469f0 Include requested state root in StateNotFoundError message for debugging (#15533) 2025-07-25 17:50:26 +00:00
Justin Traglia
f3dc4c283e Improve das-core functions (#15524)
* Improve das-core functions

* Add changelog fragment

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-07-25 15:35:05 +00:00
raulk
6ddf271688 fix(beacon-api): return syncnets and cgc in Metadata. (#15506)
* fix(beacon-api): return syncnets and cgc in Metadata.

* changelog

* fixing implementation and adding unit tests

* gaz

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: james-prysm <james@prysmaticlabs.com>
2025-07-25 15:26:56 +00:00
Manu NALEPA
a75974b5f5 Fix TestCreateLocalNode. 2025-07-25 16:17:28 +02:00
Manu NALEPA
0725dff5e8 Merge branch 'develop' into peerDAS 2025-07-25 13:26:58 +02:00
Manu NALEPA
0d95d3d022 Validator custody: Update earliest available slot. (#15527) 2025-07-25 13:20:54 +02:00
Justin Traglia
af7afba26e Move reconstruction lock to prevent unnecessary work (#15528)
* Move reconstruction lock to prevent unnecessary work

* Add changelog fragment
2025-07-24 21:06:53 +00:00
james-prysm
b740a4ff83 implements the proposer lookahead api (#15525)
* implements the proposer lookahead api

* radek's feedback
2025-07-24 15:00:43 +00:00
Radosław Kapka
385c2224e8 Return zero value for Eth-Consensus-Block-Value on error (#15526) 2025-07-24 13:58:23 +00:00
Justin Traglia
04b39d1a4d Fix some nits associated with data column sidecar verification (#15521)
* Fix some nits associated with data column sidecar verification

* Add changelog fragment
2025-07-23 20:02:32 +00:00
james-prysm
4c40caf7fd persistent enode db to persist seq number (#15519)
* adding persistent db path for persistent seq data information

* fixing typo

* Update changelog/James-prysm_persistent-seq-number.md

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update beacon-chain/p2p/discovery_test.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* adding log updated based on manu's suggestion

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-23 13:55:05 +00:00
Justin Traglia
bc209cadab Use MinEpochsForDataColumnSidecarsRequest in WithinDAPeriod for Fulu (#15522)
* Use MinEpochsForDataColumnSidecarsRequest in WithinDAPeriod for Fulu

* Make fixes

* Add blank line
2025-07-23 13:35:27 +00:00
Manu NALEPA
ede560bee1 Merge branch 'develop' into peerDAS 2025-07-23 11:07:19 +02:00
Justin Traglia
856742ff68 Update links to consensus-specs to point to master branch (#15523)
* Update links to consensus-specs to point to master branch

* Add changelog fragment
2025-07-23 08:32:09 +00:00
Manu NALEPA
abe16a9cb4 Fix downscore by peers when a node gracefully stops. (#15505)
* Log when downscoring a peer.

* `validateSequenceNumber`: Downscore peer in function, clarify and add logs

* `AddConnectionHandler`: Send majority code to the outer scope (no funtional change).

* `disconnectBadPeer`: Improve log.

* `sendRPCStatusRequest`: Improve log.

* `findPeersWithSubnets`: Add preventive peer filtering.
(As done in `s.findPeers`.)

* `Stop`: Use one `defer` for the whole function.
Reminder: `defer`s are executed backwards.

* `Stop`: Send a goodbye message to all connected peers when stopping the service.

Before this commit, stopping the service did not send any goodbye message to all connected peers. The issue with this approach is that the peer still thinks we are alive, and behaves so by trying to communicate with us. Unfortunatly, because we are offline, we cannot respond. Because of that, the peer starts to downscore us, and then bans us. As a consequence, when we restart, the peer refuses our connection request.

By sending a goodbye message when stopping the service, we ensure the peer stops to expect anything from us. When restarting, everything is allright.

* `ConnectedF` and `DisconnectedF`: Workaround very probable libp2p bug by preventing outbound connection to very recently disconnected peers.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* `AddDisconnectionHandler`: Handle multiple close calls to `DisconnectedF` for the same peer.
2025-07-22 20:15:18 +00:00
Kasey Kirkham
4086b9c6c3 fusaka fork digest enr changes 2025-07-22 11:09:30 -05:00
Kasey
49c607ea84 overhaul fork schedule management for bpos 2025-07-22 11:08:59 -05:00
james-prysm
77958022e7 removing ssz-only flag ( reverting feature) and fix accept header middleware (#15433)
* removing ssz-only flag

* gaz

* reverting other uses of sszonly

* gaz

* adding kasey and radek's suggestions

* update changelog

* adding test

* radek advice with new headers and tests

* adding logs and fixing comments

* adding logs and fixing comments

* gaz

* Update validator/client/beacon-api/rest_handler_client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/apiutil/header.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/apiutil/header.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek's comments

* adding another failing case based on radek's suggestion

* another unit test

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-07-22 16:06:51 +00:00
Kasey
fc78ad7c5b initialize genesis data asap at node start 2025-07-22 10:46:27 -05:00
Manu NALEPA
34a1bf835a Merge branch 'develop' into peerDAS 2025-07-22 17:42:04 +02:00
Manu NALEPA
b0bceac9c0 Implement validator custody with "go up only" according to the latest specification. (#15518)
* Simplify validator custody due to the latest spec.
(Go up only)

* Fix sync.
2025-07-22 17:41:15 +02:00
kasey
c21fae239f don't use gzip handler for sse (#15517)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-07-22 13:54:25 +00:00
Bastin
deb3ba7f21 Remove unused parameter from LC functions (#15514)
* remove unused parameter

* format code
2025-07-21 13:50:21 +00:00
Manu NALEPA
f288a3c0e1 Workaround TestHostIsResolved by using "more reliable" DNS resolver. (#15515) 2025-07-21 13:37:05 +00:00
Bastin
a4ca6355d0 Abstract away LC update validation rules into the LC store (#15508)
* abstract update validation into IsBetter funcs

* Update beacon-chain/core/light-client/store.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update beacon-chain/core/light-client/store.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* clean up

* clean up

* clean up again

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-21 09:37:02 +00:00
Manu NALEPA
e95d1c54cf reconstructSaveBroadcastDataColumnSidecars: Ensure a unique reconstruction. 2025-07-18 23:48:11 +02:00
Manu NALEPA
4af3763013 Merge branch 'develop' into peerDAS 2025-07-18 22:39:57 +02:00
Manu NALEPA
cd0821d026 Subnets subscription: Avoid dynamic subscribing blocking in case not enough peers per subnets are found. (#15471)
* Subnets subscription: Avoid dynamic subscribing blocking in case not enough peers per subnets are found.

* `subscribeWithParameters`: Use struct to avoid too many function parameters (no functional changes).

* Optimise subnets search.

Currently, when we are looking for peers in let's say data column sidecars subnets 3, 6 and 7, we first look for peers in subnet 3.
If, during the crawling, we meet some peers with subnet 6, we discard them (because we are exclusively looking for peers with subnet 3).
When we are happy, we start again with peers with subnet 6.

This commit optimizes that by looking for peers with satisfy our constraints in one look.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' commnet.

* Fix James' comment.

* Fix James' comment.

* Fix James's comment.

* Simplify following James' comment.

* Fix James' comment.

* Update beacon-chain/sync/rpc_goodbye.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update config/params/config.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update beacon-chain/sync/subscriber.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Fix Preston's comment.

* Fix Preston's comment.

* `TestService_BroadcastDataColumn`: Re-add sleep 50 ms.

* Fix Preston's comment.

* Update beacon-chain/p2p/subnets.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-07-18 17:19:15 +00:00
Bastin
8b53887891 Save LC Bootstraps only on finalized checkpoints (#15497)
* Unify LC API (updates)

* Remove unused fields in LC beacon API server

* bootstraps only on checkpoints

* Update beacon-chain/blockchain/receive_block.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* move tests

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-07-18 15:53:31 +00:00
Radosław Kapka
8623a144d9 Write Content-Encoding header in the response properly when gzip encoding is requested (#15499)
* Write gzip header properly

* changelog

* regression test
2025-07-17 19:14:48 +00:00
Bastin
f3314d2d24 Abstract validation logic for saving LC updates into the store function (#15504)
* Unify LC API (updates)

* Remove unused fields in LC beacon API server

* refactor lc logic into core

* fix tests
2025-07-17 14:58:18 +00:00
Bastin
bcd65e7a4d Remove extra lc server fields (#15493)
* Unify LC API (updates)

* Remove unused fields in LC beacon API server
2025-07-17 11:50:16 +00:00
Bastin
dce89a1627 Unify LC API 2/2 (updates) (#15488)
* Unify LC API (updates)

* Update beacon-chain/core/light-client/store.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-07-17 10:59:51 +00:00
Manu NALEPA
a520db7276 Merge branch 'develop' into peerDAS 2025-07-17 10:04:04 +02:00
Manu NALEPA
09485c2062 Add bundle v2 support for submit blind block (#15503)
* Add bundle v2 support for submit blind block

* add tests

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2025-07-17 07:51:28 +00:00
332 changed files with 7886 additions and 6749 deletions

View File

@@ -2,18 +2,28 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["common.go"],
srcs = [
"common.go",
"header.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/api/apiutil",
visibility = ["//visibility:public"],
deps = ["//consensus-types/primitives:go_default_library"],
deps = [
"//consensus-types/primitives:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["common_test.go"],
srcs = [
"common_test.go",
"header_test.go",
],
embed = [":go_default_library"],
deps = [
"//consensus-types/primitives:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

122
api/apiutil/header.go Normal file
View File

@@ -0,0 +1,122 @@
package apiutil
import (
"mime"
"sort"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
)
type mediaRange struct {
mt string // canonicalised mediatype, e.g. "application/json"
q float64 // quality factor (01)
raw string // original string useful for logging/debugging
spec int // 2=exact, 1=type/*, 0=*/*
}
func parseMediaRange(field string) (mediaRange, bool) {
field = strings.TrimSpace(field)
mt, params, err := mime.ParseMediaType(field)
if err != nil {
log.WithError(err).Debug("Failed to parse header field")
return mediaRange{}, false
}
r := mediaRange{mt: mt, q: 1, spec: 2, raw: field}
if qs, ok := params["q"]; ok {
v, err := strconv.ParseFloat(qs, 64)
if err != nil || v < 0 || v > 1 {
log.WithField("q", qs).Debug("Invalid quality factor (01)")
return mediaRange{}, false // skip invalid entry
}
r.q = v
}
switch {
case mt == "*/*":
r.spec = 0
case strings.HasSuffix(mt, "/*"):
r.spec = 1
}
return r, true
}
func hasExplicitQ(r mediaRange) bool {
return strings.Contains(strings.ToLower(r.raw), ";q=")
}
// ParseAccept returns media ranges sorted by q (desc) then specificity.
func ParseAccept(header string) []mediaRange {
if header == "" {
return []mediaRange{{mt: "*/*", q: 1, spec: 0, raw: "*/*"}}
}
var out []mediaRange
for _, field := range strings.Split(header, ",") {
if r, ok := parseMediaRange(field); ok {
out = append(out, r)
}
}
sort.SliceStable(out, func(i, j int) bool {
ei, ej := hasExplicitQ(out[i]), hasExplicitQ(out[j])
if ei != ej {
return ei // explicit beats implicit
}
if out[i].q != out[j].q {
return out[i].q > out[j].q
}
return out[i].spec > out[j].spec
})
return out
}
// Matches reports whether content type is acceptable per the header.
func Matches(header, ct string) bool {
for _, r := range ParseAccept(header) {
switch {
case r.q == 0:
continue
case r.mt == "*/*":
return true
case strings.HasSuffix(r.mt, "/*"):
if strings.HasPrefix(ct, r.mt[:len(r.mt)-1]) {
return true
}
case r.mt == ct:
return true
}
}
return false
}
// Negotiate selects the best server type according to the header.
// Returns the chosen type and true, or "", false when nothing matches.
func Negotiate(header string, serverTypes []string) (string, bool) {
for _, r := range ParseAccept(header) {
if r.q == 0 {
continue
}
for _, s := range serverTypes {
if Matches(r.mt, s) {
return s, true
}
}
}
return "", false
}
// PrimaryAcceptMatches only checks if the first accept matches
func PrimaryAcceptMatches(header, produced string) bool {
for _, r := range ParseAccept(header) {
if r.q == 0 {
continue // explicitly unacceptable skip
}
return Matches(r.mt, produced)
}
return false
}

174
api/apiutil/header_test.go Normal file
View File

@@ -0,0 +1,174 @@
package apiutil
import (
"testing"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestParseAccept(t *testing.T) {
type want struct {
mt string
q float64
spec int
}
cases := []struct {
name string
header string
want []want
}{
{
name: "empty header becomes */*;q=1",
header: "",
want: []want{{mt: "*/*", q: 1, spec: 0}},
},
{
name: "quality ordering then specificity",
header: "application/json;q=0.2, */*;q=0.1, application/xml;q=0.5, text/*;q=0.5",
want: []want{
{mt: "application/xml", q: 0.5, spec: 2},
{mt: "text/*", q: 0.5, spec: 1},
{mt: "application/json", q: 0.2, spec: 2},
{mt: "*/*", q: 0.1, spec: 0},
},
},
{
name: "invalid pieces are skipped",
header: "text/plain; q=boom, application/json",
want: []want{{mt: "application/json", q: 1, spec: 2}},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got := ParseAccept(tc.header)
gotProjected := make([]want, len(got))
for i, g := range got {
gotProjected[i] = want{mt: g.mt, q: g.q, spec: g.spec}
}
require.DeepEqual(t, gotProjected, tc.want)
})
}
}
func TestMatches(t *testing.T) {
cases := []struct {
name string
accept string
ct string
matches bool
}{
{"exact match", "application/json", "application/json", true},
{"type wildcard", "application/*;q=0.8", "application/xml", true},
{"global wildcard", "*/*;q=0.1", "image/png", true},
{"explicitly unacceptable (q=0)", "text/*;q=0", "text/plain", false},
{"no match", "image/png", "application/json", false},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got := Matches(tc.accept, tc.ct)
require.Equal(t, tc.matches, got)
})
}
}
func TestNegotiate(t *testing.T) {
cases := []struct {
name string
accept string
serverTypes []string
wantType string
ok bool
}{
{
name: "highest quality wins",
accept: "application/json;q=0.8,application/xml;q=0.9",
serverTypes: []string{"application/json", "application/xml"},
wantType: "application/xml",
ok: true,
},
{
name: "wildcard matches first server type",
accept: "*/*;q=0.5",
serverTypes: []string{"application/octet-stream", "application/json"},
wantType: "application/octet-stream",
ok: true,
},
{
name: "no acceptable type",
accept: "image/png",
serverTypes: []string{"application/json"},
wantType: "",
ok: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got, ok := Negotiate(tc.accept, tc.serverTypes)
require.Equal(t, tc.ok, ok)
require.Equal(t, tc.wantType, got)
})
}
}
func TestPrimaryAcceptMatches(t *testing.T) {
tests := []struct {
name string
accept string
produced string
expect bool
}{
{
name: "prefers json",
accept: "application/json;q=0.9,application/xml",
produced: "application/json",
expect: true,
},
{
name: "wildcard application beats other wildcard",
accept: "application/*;q=0.2,*/*;q=0.1",
produced: "application/xml",
expect: true,
},
{
name: "json wins",
accept: "application/xml;q=0.8,application/json;q=0.9",
produced: "application/json",
expect: true,
},
{
name: "json loses",
accept: "application/xml;q=0.8,application/json;q=0.9,application/octet-stream;q=0.99",
produced: "application/json",
expect: false,
},
{
name: "json wins with non q option",
accept: "application/xml;q=0.8,image/png,application/json;q=0.9",
produced: "application/json",
expect: true,
},
{
name: "json not primary",
accept: "image/png,application/json",
produced: "application/json",
expect: false,
},
{
name: "absent header",
accept: "",
produced: "text/plain",
expect: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := PrimaryAcceptMatches(tc.accept, tc.produced)
require.Equal(t, got, tc.expect)
})
}
}

View File

@@ -16,7 +16,6 @@ go_library(
"//api/server/structs:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -9,7 +9,6 @@ import (
"net/url"
"path"
"regexp"
"sort"
"strconv"
"github.com/OffchainLabs/prysm/v6/api/client"
@@ -17,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
return fr.ToConsensus()
}
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.Get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
fsr := &forkScheduleResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
ofs, err := fsr.OrderedForkSchedule()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
}
return ofs, nil
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
}
return poolResponse, nil
}
type forkScheduleResponse struct {
Data []structs.Fork
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
return nil, err
}
if len(vSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
}
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(epoch),
})
}
sort.Sort(ofs)
return ofs, nil
}

View File

@@ -50,6 +50,7 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",

View File

@@ -2,6 +2,7 @@ package builder
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
@@ -13,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
@@ -1573,3 +1575,166 @@ func TestRequestLogger(t *testing.T) {
err = c.Status(ctx)
require.NoError(t, err)
}
func TestGetVersionsBlockToPayload(t *testing.T) {
tests := []struct {
name string
blockVersion int
expectedVersion int
expectedError bool
}{
{
name: "Fulu version",
blockVersion: 6, // version.Fulu
expectedVersion: 6,
expectedError: false,
},
{
name: "Deneb version",
blockVersion: 4, // version.Deneb
expectedVersion: 4,
expectedError: false,
},
{
name: "Capella version",
blockVersion: 3, // version.Capella
expectedVersion: 3,
expectedError: false,
},
{
name: "Bellatrix version",
blockVersion: 2, // version.Bellatrix
expectedVersion: 2,
expectedError: false,
},
{
name: "Unsupported version",
blockVersion: 0,
expectedError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
version, err := getVersionsBlockToPayload(tt.blockVersion)
if tt.expectedError {
assert.NotNil(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expectedVersion, version)
}
})
}
}
func TestParseBlindedBlockResponseSSZ_WithBlobsBundleV2(t *testing.T) {
c := &Client{sszEnabled: true}
// Create test payload
payload := &v1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BlockNumber: 123456,
GasLimit: 30000000,
GasUsed: 21000,
Timestamp: 1234567890,
ExtraData: []byte("test-extra-data"),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: [][]byte{},
Withdrawals: []*v1.Withdrawal{},
BlobGasUsed: 1024,
ExcessBlobGas: 2048,
}
// Create test BlobsBundleV2
bundleV2 := &v1.BlobsBundleV2{
KzgCommitments: [][]byte{make([]byte, 48), make([]byte, 48)},
Proofs: [][]byte{make([]byte, 48), make([]byte, 48)},
Blobs: [][]byte{make([]byte, 131072), make([]byte, 131072)},
}
// Test Fulu version (should use ExecutionPayloadDenebAndBlobsBundleV2)
t.Run("Fulu version with BlobsBundleV2", func(t *testing.T) {
payloadAndBlobsV2 := &v1.ExecutionPayloadDenebAndBlobsBundleV2{
Payload: payload,
BlobsBundle: bundleV2,
}
respBytes, err := payloadAndBlobsV2.MarshalSSZ()
require.NoError(t, err)
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 6) // version.Fulu
require.NoError(t, err)
require.NotNil(t, ed)
require.NotNil(t, bundle)
// Verify the bundle is BlobsBundleV2
bundleV2Result, ok := bundle.(*v1.BlobsBundleV2)
assert.Equal(t, true, ok, "Expected BlobsBundleV2 type")
require.Equal(t, len(bundleV2.KzgCommitments), len(bundleV2Result.KzgCommitments))
require.Equal(t, len(bundleV2.Proofs), len(bundleV2Result.Proofs))
require.Equal(t, len(bundleV2.Blobs), len(bundleV2Result.Blobs))
})
// Test Deneb version (should use regular BlobsBundle)
t.Run("Deneb version with regular BlobsBundle", func(t *testing.T) {
regularBundle := &v1.BlobsBundle{
KzgCommitments: bundleV2.KzgCommitments,
Proofs: bundleV2.Proofs,
Blobs: bundleV2.Blobs,
}
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{
Payload: payload,
BlobsBundle: regularBundle,
}
respBytes, err := payloadAndBlobs.MarshalSSZ()
require.NoError(t, err)
ed, bundle, err := c.parseBlindedBlockResponseSSZ(respBytes, 4) // version.Deneb
require.NoError(t, err)
require.NotNil(t, ed)
require.NotNil(t, bundle)
// Verify the bundle is regular BlobsBundle
regularBundleResult, ok := bundle.(*v1.BlobsBundle)
assert.Equal(t, true, ok, "Expected BlobsBundle type")
require.Equal(t, len(regularBundle.KzgCommitments), len(regularBundleResult.KzgCommitments))
})
// Test invalid SSZ data
t.Run("Invalid SSZ data", func(t *testing.T) {
invalidBytes := []byte("invalid-ssz-data")
ed, bundle, err := c.parseBlindedBlockResponseSSZ(invalidBytes, 6)
assert.NotNil(t, err)
assert.Equal(t, true, ed == nil)
assert.Equal(t, true, bundle == nil)
})
}
func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
// Note: The full integration test is complex due to version detection logic
// The key functionality is tested in the parseBlindedBlockResponseSSZ tests above
// and in the mock service tests which verify the interface changes work correctly
t.Run("Interface signature verification", func(t *testing.T) {
// This test verifies that the SubmitBlindedBlock method signature
// has been updated to return BlobsBundler interface
client := &Client{}
// Verify the method exists with the correct signature
// by using reflection or by checking it compiles with the interface
var _ func(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error) = client.SubmitBlindedBlock
// This test passes if the signature is correct
assert.Equal(t, true, true)
})
}

View File

@@ -8,7 +8,12 @@ go_library(
],
importpath = "github.com/OffchainLabs/prysm/v6/api/server/middleware",
visibility = ["//visibility:public"],
deps = ["@com_github_rs_cors//:go_default_library"],
deps = [
"//api:go_default_library",
"//api/apiutil:go_default_library",
"@com_github_rs_cors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
@@ -22,5 +27,6 @@ go_test(
"//api:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,11 +1,15 @@
package middleware
import (
"compress/gzip"
"fmt"
"net/http"
"strings"
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/apiutil"
"github.com/rs/cors"
log "github.com/sirupsen/logrus"
)
type Middleware func(http.Handler) http.Handler
@@ -71,47 +75,64 @@ func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
acceptHeader := r.Header.Get("Accept")
// header is optional and should skip if not provided
if acceptHeader == "" {
if _, ok := apiutil.Negotiate(r.Header.Get("Accept"), serverAcceptedTypes); !ok {
http.Error(w, "Not Acceptable", http.StatusNotAcceptable)
return
}
next.ServeHTTP(w, r)
})
}
}
// AcceptEncodingHeaderHandler compresses the response before sending it back to the client, if gzip is supported.
func AcceptEncodingHeaderHandler() Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
next.ServeHTTP(w, r)
return
}
accepted := false
acceptTypes := strings.Split(acceptHeader, ",")
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
for _, acceptType := range acceptTypes {
acceptType = strings.TrimSpace(acceptType)
if acceptType == "*/*" {
accepted = true
break
gz := gzip.NewWriter(w)
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
defer func() {
if !gzipRW.zip {
return
}
for _, serverAcceptedType := range serverAcceptedTypes {
if strings.HasPrefix(acceptType, serverAcceptedType) {
accepted = true
break
}
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
accepted = true
break
}
if err := gz.Close(); err != nil {
log.WithError(err).Error("Failed to close gzip writer")
}
if accepted {
break
}
}
}()
if !accepted {
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
return
}
next.ServeHTTP(w, r)
next.ServeHTTP(gzipRW, r)
})
}
}
type gzipResponseWriter struct {
gz *gzip.Writer
http.ResponseWriter
zip bool
}
func (g *gzipResponseWriter) WriteHeader(statusCode int) {
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
// Removing the current Content-Length because zipping will change it.
g.Header().Del("Content-Length")
g.Header().Set("Content-Encoding", "gzip")
g.zip = true
}
g.ResponseWriter.WriteHeader(statusCode)
}
func (g *gzipResponseWriter) Write(b []byte) (int, error) {
if g.zip {
return g.gz.Write(b)
}
return g.ResponseWriter.Write(b)
}
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
if len(mw) < 1 {
return h

View File

@@ -1,14 +1,35 @@
package middleware
import (
"bytes"
"compress/gzip"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/testing/require"
log "github.com/sirupsen/logrus"
)
// frozenHeaderRecorder allows asserting that response headers were not modified
// after the call to WriteHeader.
//
// Its purpose is to have a regression test for https://github.com/OffchainLabs/prysm/pull/15499.
type frozenHeaderRecorder struct {
*httptest.ResponseRecorder
frozenHeader http.Header
}
func (r *frozenHeaderRecorder) WriteHeader(code int) {
if r.frozenHeader != nil {
return
}
r.ResponseRecorder.WriteHeader(code)
r.frozenHeader = r.ResponseRecorder.Header().Clone()
}
func TestNormalizeQueryValuesHandler(t *testing.T) {
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("next handler"))
@@ -124,6 +145,90 @@ func TestContentTypeHandler(t *testing.T) {
}
}
func TestAcceptEncodingHeaderHandler(t *testing.T) {
dummyContent := "Test gzip middleware content"
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", r.Header.Get("Accept"))
w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte(dummyContent))
require.NoError(t, err)
})
handler := AcceptEncodingHeaderHandler()(nextHandler)
tests := []struct {
name string
accept string
acceptEncoding string
expectCompressed bool
}{
{
name: "Accept gzip",
accept: api.JsonMediaType,
acceptEncoding: "gzip",
expectCompressed: true,
},
{
name: "Accept multiple encodings",
accept: api.JsonMediaType,
acceptEncoding: "deflate, gzip",
expectCompressed: true,
},
{
name: "Accept unsupported encoding",
accept: api.JsonMediaType,
acceptEncoding: "deflate",
expectCompressed: false,
},
{
name: "No accept encoding header",
accept: api.JsonMediaType,
acceptEncoding: "",
expectCompressed: false,
},
{
name: "SSZ",
accept: api.OctetStreamMediaType,
acceptEncoding: "gzip",
expectCompressed: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/", nil)
req.Header.Set("Accept", tt.accept)
if tt.acceptEncoding != "" {
req.Header.Set("Accept-Encoding", tt.acceptEncoding)
}
rr := &frozenHeaderRecorder{ResponseRecorder: httptest.NewRecorder()}
handler.ServeHTTP(rr, req)
if tt.expectCompressed {
require.Equal(t, "gzip", rr.frozenHeader.Get("Content-Encoding"), "Expected Content-Encoding header to be 'gzip'")
compressedBody := rr.Body.Bytes()
require.NotEqual(t, dummyContent, string(compressedBody), "Response body should be compressed and differ from the original")
gzReader, err := gzip.NewReader(bytes.NewReader(compressedBody))
require.NoError(t, err, "Failed to create gzipReader")
defer func() {
if err := gzReader.Close(); err != nil {
log.WithError(err).Error("Failed to close gzip reader")
}
}()
decompressedBody, err := io.ReadAll(gzReader)
require.NoError(t, err, "Failed to decompress response body")
require.Equal(t, dummyContent, string(decompressedBody), "Decompressed content should match the original")
} else {
require.Equal(t, dummyContent, rr.Body.String(), "Response body should be uncompressed and match the original")
}
})
}
}
func TestAcceptHeaderHandler(t *testing.T) {
acceptedTypes := []string{"application/json", "application/octet-stream"}

View File

@@ -36,6 +36,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/consensus-types/validator"
"github.com/OffchainLabs/prysm/v6/container/slice"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
@@ -699,6 +700,11 @@ func (m *SyncCommitteeMessage) ToConsensus() (*eth.SyncCommitteeMessage, error)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
// Add validation to check if the signature is valid BLS format
_, err = bls.SignatureFromBytes(sig)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SyncCommitteeMessage{
Slot: primitives.Slot(slot),

View File

@@ -283,3 +283,10 @@ type GetPendingPartialWithdrawalsResponse struct {
Finalized bool `json:"finalized"`
Data []*PendingPartialWithdrawal `json:"data"`
}
type GetProposerLookaheadResponse struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data []string `json:"data"` // validator indexes
}

View File

@@ -25,10 +25,10 @@ type Identity struct {
}
type Metadata struct {
SeqNumber string `json:"seq_number"`
Attnets string `json:"attnets"`
Syncnets string `json:"syncnets"`
CustodyGroupCount string `json:"custody_group_count"`
SeqNumber string `json:"seq_number"`
Attnets string `json:"attnets"`
Syncnets string `json:"syncnets,omitempty"`
Cgc string `json:"custody_group_count,omitempty"`
}
type GetPeerResponse struct {

View File

@@ -73,6 +73,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -181,6 +182,7 @@ go_test(
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -194,6 +196,7 @@ go_test(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -624,6 +625,7 @@ func Test_hashForGenesisRoot(t *testing.T) {
ctx := t.Context()
c := setupBeaconChain(t, beaconDB)
st, _ := util.DeterministicGenesisStateElectra(t, 10)
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
root, err := beaconDB.GenesisBlockRoot(ctx)
require.NoError(t, err)

View File

@@ -174,6 +174,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
}).Info("Forkchoice updated with payload attributes for proposal")
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), arg.headBlock, arg.headRoot, nextSlot)
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),

View File

@@ -19,6 +19,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -309,6 +310,7 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
block: wba,
}
genesis.StoreStateDuringTest(t, st)
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
a := &fcuConfig{
@@ -403,6 +405,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
bState, _ := util.DeterministicGenesisState(t, 10)
genesis.StoreStateDuringTest(t, bState)
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)

View File

@@ -102,8 +102,6 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
log.WithError(err).Error("Could not save head")
}
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
// Only need to prune attestations from pool if the head has changed.
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
return nil

View File

@@ -11,7 +11,7 @@ import (
)
var (
// https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json
// https://github.com/ethereum/consensus-specs/blob/master/presets/mainnet/trusted_setups/trusted_setup_4096.json
//go:embed trusted_setup_4096.json
embeddedTrustedSetup []byte // 1.2Mb
kzgContext *GoKZG.Context

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
@@ -235,14 +234,6 @@ func WithSyncChecker(checker Checker) Option {
}
}
// WithCustodyInfo sets the custody info for the blockchain service.
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
return func(s *Service) error {
s.cfg.CustodyInfo = custodyInfo
return nil
}
}
// WithSlasherEnabled sets whether the slasher is enabled or not.
func WithSlasherEnabled(enabled bool) Option {
return func(s *Service) error {

View File

@@ -666,10 +666,11 @@ func (s *Service) areDataColumnsAvailable(
root [fieldparams.RootLength]byte,
block interfaces.ReadOnlyBeaconBlock,
) error {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return nil
}
@@ -693,13 +694,15 @@ func (s *Service) areDataColumnsAvailable(
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
nodeID := s.cfg.P2P.NodeID()
// Prevent custody group count to change during the rest of the function.
s.cfg.CustodyInfo.Mut.RLock()
defer s.cfg.CustodyInfo.Mut.RUnlock()
// Get the custody group sampling size for the node.
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
custodyGroupCount := s.cfg.P2P.CustodyGroupCount()
// Compute the sampling size.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#custody-sampling
samplingSize := max(samplesPerSlot, custodyGroupCount)
// Get the peer info for the node.
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
if err != nil {
return errors.Wrap(err, "peer info")
}

View File

@@ -134,9 +134,6 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
if err := s.processLightClientUpdate(cfg); err != nil {
log.WithError(err).Error("Failed to process light client update")
}
if err := s.processLightClientBootstrap(cfg); err != nil {
log.WithError(err).Error("Failed to process light client bootstrap")
}
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
log.WithError(err).Error("Failed to process light client optimistic update")
}
@@ -174,62 +171,14 @@ func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
}
update, err := lightclient.NewLightClientUpdateFromBeaconState(
cfg.ctx,
s.CurrentSlot(),
cfg.postState,
cfg.roblock,
attestedState,
attestedBlock,
finalizedBlock,
)
update, err := lightclient.NewLightClientUpdateFromBeaconState(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock)
if err != nil {
return errors.Wrapf(err, "could not create light client update")
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
oldUpdate, err := s.cfg.BeaconDB.LightClientUpdate(cfg.ctx, period)
if err != nil {
return errors.Wrapf(err, "could not get current light client update")
}
if oldUpdate == nil {
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
return errors.Wrapf(err, "could not save light client update")
}
log.WithField("period", period).Debug("Saved new light client update")
return nil
}
isNewUpdateBetter, err := lightclient.IsBetterUpdate(update, oldUpdate)
if err != nil {
return errors.Wrapf(err, "could not compare light client updates")
}
if isNewUpdateBetter {
if err := s.cfg.BeaconDB.SaveLightClientUpdate(cfg.ctx, period, update); err != nil {
return errors.Wrapf(err, "could not save light client update")
}
log.WithField("period", period).Debug("Saved new light client update")
return nil
}
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
return nil
}
// processLightClientBootstrap saves a light client bootstrap for this block
// when feature flag is enabled.
func (s *Service) processLightClientBootstrap(cfg *postBlockProcessConfig) error {
blockRoot := cfg.roblock.Root()
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(cfg.ctx, s.CurrentSlot(), cfg.postState, cfg.roblock)
if err != nil {
return errors.Wrapf(err, "could not create light client bootstrap")
}
if err := s.lcStore.SaveLightClientBootstrap(cfg.ctx, blockRoot, bootstrap); err != nil {
return errors.Wrapf(err, "could not save light client bootstrap")
}
return nil
return s.lcStore.SaveLightClientUpdate(cfg.ctx, period, update)
}
func (s *Service) processLightClientFinalityUpdate(
@@ -264,40 +213,17 @@ func (s *Service) processLightClientFinalityUpdate(
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
}
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
postState.Slot(),
postState,
signed,
attestedState,
attestedBlock,
finalizedBlock,
)
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock, finalizedBlock)
if err != nil {
return errors.Wrap(err, "could not create light client finality update")
}
lastUpdate := s.lcStore.LastFinalityUpdate()
if lastUpdate != nil {
// The finalized_header.beacon.lastUpdateSlot is greater than that of all previously forwarded finality_updates,
// or it matches the highest previously forwarded lastUpdateSlot and also has a sync_aggregate indicating supermajority (> 2/3)
// sync committee participation while the previously forwarded finality_update for that lastUpdateSlot did not indicate supermajority
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
newHasSupermajority := lightclient.UpdateHasSupermajority(newUpdate.SyncAggregate())
lastUpdateSlot := lastUpdate.FinalizedHeader().Beacon().Slot
lastHasSupermajority := lightclient.UpdateHasSupermajority(lastUpdate.SyncAggregate())
if newUpdateSlot < lastUpdateSlot {
log.Debug("Skip saving light client finality newUpdate: Older than local newUpdate")
return nil
}
if newUpdateSlot == lastUpdateSlot && (lastHasSupermajority || !newHasSupermajority) {
log.Debug("Skip saving light client finality update: No supermajority advantage")
return nil
}
if !lightclient.IsBetterFinalityUpdate(newUpdate, s.lcStore.LastFinalityUpdate()) {
log.Debug("Skip saving light client finality update: current update is better")
return nil
}
log.Debug("Saving new light client finality update")
s.lcStore.SetLastFinalityUpdate(newUpdate)
@@ -325,14 +251,7 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
}
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
ctx,
postState.Slot(),
postState,
signed,
attestedState,
attestedBlock,
)
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock)
if err != nil {
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
@@ -342,13 +261,9 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
return errors.Wrap(err, "could not create light client optimistic update")
}
lastUpdate := s.lcStore.LastOptimisticUpdate()
if lastUpdate != nil {
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
if newUpdate.AttestedHeader().Beacon().Slot <= lastUpdate.AttestedHeader().Beacon().Slot {
log.Debug("Skip saving light client optimistic update: Older than local update")
return nil
}
if !lightclient.IsBetterOptimisticUpdate(newUpdate, s.lcStore.LastOptimisticUpdate()) {
log.Debug("Skip saving light client optimistic update: current update is better")
return nil
}
log.Debug("Saving new light client optimistic update")

View File

@@ -35,6 +35,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -1980,14 +1981,15 @@ func TestNoViableHead_Reboot(t *testing.T) {
genesisState, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := genesisState.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
gb := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(gb)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
genesisRoot, err := gb.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
require.NoError(t, service.saveGenesisData(ctx, genesisState))
genesis.StoreStateDuringTest(t, genesisState)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
@@ -2713,492 +2715,6 @@ func TestProcessLightClientUpdate(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
s, tr := minimalTestService(t)
ctx := tr.ctx
t.Run("Altair", func(t *testing.T) {
t.Run("No old update", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Altair)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
require.NoError(t, s.processLightClientUpdate(cfg))
// Check that the light client update is saved
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Altair)
})
t.Run("New update is better", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Altair)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Altair)
})
t.Run("Old update is better", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Altair)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
scb := make([]byte, 64)
for i := 0; i < 5; i++ {
scb[i] = 0x01
}
oldUpdate.SetSyncAggregate(&ethpb.SyncAggregate{
SyncCommitteeBits: scb,
SyncCommitteeSignature: make([]byte, 96),
})
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
require.DeepEqual(t, oldUpdate, u)
require.Equal(t, u.Version(), version.Altair)
})
})
t.Run("Capella", func(t *testing.T) {
t.Run("No old update", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
require.NoError(t, s.processLightClientUpdate(cfg))
// Check that the light client update is saved
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Capella)
})
t.Run("New update is better", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Capella)
})
t.Run("Old update is better", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
scb := make([]byte, 64)
for i := 0; i < 5; i++ {
scb[i] = 0x01
}
oldUpdate.SetSyncAggregate(&ethpb.SyncAggregate{
SyncCommitteeBits: scb,
SyncCommitteeSignature: make([]byte, 96),
})
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
require.DeepEqual(t, oldUpdate, u)
require.Equal(t, u.Version(), version.Capella)
})
})
t.Run("Deneb", func(t *testing.T) {
t.Run("No old update", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
require.NoError(t, s.processLightClientUpdate(cfg))
// Check that the light client update is saved
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Deneb)
})
t.Run("New update is better", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), version.Deneb)
})
t.Run("Old update is better", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb)
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
postState: l.State,
isValidPayload: true,
}
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
scb := make([]byte, 64)
for i := 0; i < 5; i++ {
scb[i] = 0x01
}
oldUpdate.SetSyncAggregate(&ethpb.SyncAggregate{
SyncCommitteeBits: scb,
SyncCommitteeSignature: make([]byte, 96),
})
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
require.DeepEqual(t, oldUpdate, u)
require.Equal(t, u.Version(), version.Deneb)
})
})
reset()
}
func TestProcessLightClientBootstrap(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
defer reset()
s, tr := minimalTestService(t, WithLCStore())
@@ -3210,6 +2726,13 @@ func TestProcessLightClientBootstrap(t *testing.T) {
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().VersionToForkEpochMap()[testVersion])*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
require.NoError(t, err)
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
require.NoError(t, err)
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
@@ -3220,6 +2743,9 @@ func TestProcessLightClientBootstrap(t *testing.T) {
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
@@ -3227,17 +2753,66 @@ func TestProcessLightClientBootstrap(t *testing.T) {
isValidPayload: true,
}
require.NoError(t, s.processLightClientBootstrap(cfg))
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// Check that the light client bootstrap is saved
b, err := s.lcStore.LightClientBootstrap(ctx, currentBlockRoot)
require.NoError(t, err)
require.NotNil(t, b)
t.Run("no old update", func(t *testing.T) {
require.NoError(t, s.processLightClientUpdate(cfg))
stateRoot, err := l.State.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
require.Equal(t, b.Version(), testVersion)
// Check that the light client update is saved
u, err := s.lcStore.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), testVersion)
})
t.Run("new update is better", func(t *testing.T) {
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.lcStore.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
require.NoError(t, err)
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
require.Equal(t, u.Version(), testVersion)
})
t.Run("old update is better", func(t *testing.T) {
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
// set a better sync aggregate
scb := make([]byte, 64)
for i := 0; i < 5; i++ {
scb[i] = 0x01
}
oldUpdate.SetSyncAggregate(&ethpb.SyncAggregate{
SyncCommitteeBits: scb,
SyncCommitteeSignature: make([]byte, 96),
})
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
require.NoError(t, err)
require.NoError(t, s.processLightClientUpdate(cfg))
u, err := s.lcStore.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.NotNil(t, u)
require.DeepEqual(t, oldUpdate, u)
require.Equal(t, u.Version(), testVersion)
})
})
}
}
@@ -3321,7 +2896,6 @@ func TestIsDataAvailable(t *testing.T) {
}
params := testIsAvailableParams{
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
@@ -3334,7 +2908,6 @@ func TestIsDataAvailable(t *testing.T) {
t.Run("Fulu - no missing data columns", func(t *testing.T) {
params := testIsAvailableParams{
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
blobKzgCommitmentsCount: 3,
}
@@ -3349,7 +2922,7 @@ func TestIsDataAvailable(t *testing.T) {
startWaiting := make(chan bool)
testParams := testIsAvailableParams{
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
columnsToSave: []uint64{1, 17, 19, 75, 102, 117, 119}, // 119 is not needed, 42 and 87 are missing
blobKzgCommitmentsCount: 3,
@@ -3386,6 +2959,9 @@ func TestIsDataAvailable(t *testing.T) {
require.NoError(t, err)
}()
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
defer cancel()
err = service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
@@ -3398,10 +2974,6 @@ func TestIsDataAvailable(t *testing.T) {
startWaiting := make(chan bool)
var custodyInfo peerdas.CustodyInfo
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
@@ -3410,12 +2982,14 @@ func TestIsDataAvailable(t *testing.T) {
}
testParams := testIsAvailableParams{
options: []Option{WithCustodyInfo(&custodyInfo), WithStartWaitingDataColumnSidecars(startWaiting)},
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
columnsToSave: indices,
blobKzgCommitmentsCount: 3,
}
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
_, _, err := service.cfg.P2P.UpdateCustodyInfo(0, cgc)
require.NoError(t, err)
block := signed.Block()
slot := block.Slot()
proposerIndex := block.ProposerIndex()
@@ -3447,6 +3021,9 @@ func TestIsDataAvailable(t *testing.T) {
require.NoError(t, err)
}()
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
defer cancel()
err = service.isDataAvailable(ctx, root, signed)
require.NoError(t, err)
})
@@ -3455,7 +3032,7 @@ func TestIsDataAvailable(t *testing.T) {
startWaiting := make(chan bool)
params := testIsAvailableParams{
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
blobKzgCommitmentsCount: 3,
}
@@ -3606,14 +3183,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
require.NoError(t, s.processLightClientOptimisticUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
oldActualUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(
lOld.Ctx,
lOld.State.Slot(),
lOld.State,
lOld.Block,
lOld.AttestedState,
lOld.AttestedBlock,
)
oldActualUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(lOld.Ctx, lOld.State, lOld.Block, lOld.AttestedState, lOld.AttestedBlock)
require.NoError(t, err)
// check that the old update is saved
@@ -3627,14 +3197,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
require.NoError(t, s.processLightClientOptimisticUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
newActualUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(
lNew.Ctx,
lNew.State.Slot(),
lNew.State,
lNew.Block,
lNew.AttestedState,
lNew.AttestedBlock,
)
newActualUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lNew.Ctx, lNew.State, lNew.Block, lNew.AttestedState, lNew.AttestedBlock)
require.NoError(t, err)
require.DeepNotEqual(t, newActualUpdate, oldActualUpdate, "new update should not be equal to old update")
@@ -3762,15 +3325,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
require.NoError(t, s.processLightClientFinalityUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
// check that the old update is saved
actualOldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
cfgOld.postState.Slot(),
cfgOld.postState,
cfgOld.roblock,
lOld.AttestedState,
lOld.AttestedBlock,
lOld.FinalizedBlock,
)
actualOldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgOld.postState, cfgOld.roblock, lOld.AttestedState, lOld.AttestedBlock, lOld.FinalizedBlock)
require.NoError(t, err)
oldUpdate := s.lcStore.LastFinalityUpdate()
require.DeepEqual(t, actualOldUpdate, oldUpdate)
@@ -3781,15 +3336,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
require.NoError(t, s.processLightClientFinalityUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
// check that the actual old update and the actual new update are different
actualNewUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
cfgNew.postState.Slot(),
cfgNew.postState,
cfgNew.roblock,
lNew.AttestedState,
lNew.AttestedBlock,
lNew.FinalizedBlock,
)
actualNewUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgNew.postState, cfgNew.roblock, lNew.AttestedState, lNew.AttestedBlock, lNew.FinalizedBlock)
require.NoError(t, err)
require.DeepNotEqual(t, actualOldUpdate, actualNewUpdate)

View File

@@ -177,7 +177,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
for _, a := range atts {
// Based on the spec, don't process the attestation until the subsequent slot.
// This delays consideration in the fork choice until their slot is in the past.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/fork-choice.md#validate_on_attestation
nextSlot := a.GetData().Slot + 1
if err := slots.VerifyTime(s.genesisTime, nextSlot, disparity); err != nil {
continue

View File

@@ -300,15 +300,30 @@ func (s *Service) reportPostBlockProcessing(
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
// Send finalization event
go func() {
s.sendNewFinalizedEvent(ctx, finalizedState)
}()
// Insert finalized deposits into finalized deposit trie
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDepositsAndPrune(depCtx, finalized.Root)
cancel()
}()
if features.Get().EnableLightClient {
// Save a light client bootstrap for the finalized checkpoint
go func() {
err := s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root)
if err != nil {
log.WithError(err).Error("Could not save light client bootstrap by block root")
} else {
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
}
}()
}
}
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning

View File

@@ -9,7 +9,9 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
@@ -19,6 +21,7 @@ import (
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -564,3 +567,48 @@ func Test_executePostFinalizationTasks(t *testing.T) {
})
}
func TestProcessLightClientBootstrap(t *testing.T) {
featCfg := &features.Flags{}
featCfg.EnableLightClient = true
reset := features.InitWithReset(featCfg)
defer reset()
s, tr := minimalTestService(t, WithLCStore())
ctx := tr.ctx
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
t.Run(version.String(testVersion), func(t *testing.T) {
l := util.NewTestLightClient(t, testVersion)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock))
finalizedBlockRoot, err := l.FinalizedBlock.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, l.FinalizedState, finalizedBlockRoot))
cp := l.AttestedState.FinalizedCheckpoint()
require.DeepSSZEqual(t, finalizedBlockRoot, [32]byte(cp.Root))
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
sss, err := s.cfg.BeaconDB.State(ctx, finalizedBlockRoot)
require.NoError(t, err)
require.NotNil(t, sss)
s.executePostFinalizationTasks(s.ctx, l.FinalizedState)
// wait for the goroutine to finish processing
time.Sleep(1 * time.Second)
// Check that the light client bootstrap is saved
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
require.NoError(t, err)
require.NotNil(t, b)
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
require.NoError(t, err)
require.DeepEqual(t, btst, b)
require.Equal(t, b.Version(), testVersion)
})
}
}

View File

@@ -15,7 +15,6 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
}
// ReceiveDataColumn receives a single data column.
// (It is only a wrapper around ReceiveDataColumns.)
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
return errors.Wrap(err, "save data column sidecars")

View File

@@ -12,11 +12,9 @@ import (
"github.com/OffchainLabs/prysm/v6/async/event"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
@@ -31,6 +29,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -98,7 +97,6 @@ type config struct {
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller execution.EngineCaller
SyncChecker Checker
CustodyInfo *peerdas.CustodyInfo
}
// Checker is an interface used to determine if a node is in initial sync
@@ -220,17 +218,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Start a blockchain service's main event loop.
func (s *Service) Start() {
saved := s.cfg.FinalizedStateAtStartUp
defer s.removeStartupState()
if saved != nil && !saved.IsNil() {
if err := s.StartFromSavedState(saved); err != nil {
log.Fatal(err)
}
} else {
if err := s.startFromExecutionChain(); err != nil {
log.Fatal(err)
}
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
log.Fatal(err)
}
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
@@ -279,6 +269,9 @@ func (s *Service) Status() error {
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if state.IsNil(saved) {
return errors.New("Last finalized state at startup is nil")
}
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = saved.GenesisTime()
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
@@ -308,6 +301,16 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
return errors.Wrap(err, "failed to initialize blockchain service")
}
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
if err != nil {
return errors.Wrap(err, "could not get and save custody group count")
}
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
return errors.Wrap(err, "update custody info")
}
return nil
}
@@ -370,62 +373,6 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
return nil
}
func (s *Service) startFromExecutionChain() error {
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
if s.cfg.ChainStartFetcher == nil {
return errors.New("not configured execution chain")
}
go func() {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
for {
select {
case e := <-stateChannel:
if e.Type == statefeed.ChainStarted {
data, ok := e.Data.(*statefeed.ChainStartedData)
if !ok {
log.Error("Event data is not type *statefeed.ChainStartedData")
return
}
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
s.onExecutionChainStart(s.ctx, data.StartTime)
return
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state forRoot failed")
return
}
}
}()
return nil
}
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
if err != nil {
log.WithError(err).Fatal("Could not initialize beacon chain")
}
// We start a counter to genesis, if needed.
gRoot, err := initializedState.HashTreeRoot(s.ctx)
if err != nil {
log.WithError(err).Fatal("Could not hash tree root genesis state")
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
log.WithError(err).Fatal("Failed to initialize blockchain service from execution start event")
}
}
// initializes the state and genesis block of the beacon chain to persistent storage
// based on a genesis timestamp value obtained from the ChainStart event emitted
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
@@ -528,6 +475,57 @@ func (s *Service) removeStartupState() {
s.cfg.FinalizedStateAtStartUp = nil
}
// UpdateCustodyInfoInDB updates the custody information in the database.
// It returns the (potentially updated) custody group count and the earliest available slot.
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
beaconConfig := params.BeaconConfig()
custodyRequirement := beaconConfig.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
if err != nil {
log.WithError(err).Error("Could not update subscription status to all data subnets")
}
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
log.Warnf(
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
flags.SubscribeAllDataSubnets.Name,
)
}
// Compute the custody group count.
custodyGroupCount := custodyRequirement
if isSubscribedToAllDataSubnets {
custodyGroupCount = beaconConfig.NumberOfColumns
}
// Safely compute the fulu fork slot.
fuluForkSlot, err := fuluForkSlot()
if err != nil {
return 0, 0, errors.Wrap(err, "fulu fork slot")
}
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
if slot < fuluForkSlot {
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
if err != nil {
return 0, 0, errors.Wrap(err, "earliest slot")
}
}
custodyGroupCount, earliestAvailableSlot, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, custodyGroupCount, slot)
if err != nil {
return 0, 0, errors.Wrap(err, "update custody info")
}
return earliestAvailableSlot, custodyGroupCount, nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
currentTime := prysmTime.Now()
if currentTime.After(genesisTime) {
@@ -544,3 +542,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
}
func fuluForkSlot() (primitives.Slot, error) {
beaconConfig := params.BeaconConfig()
fuluForkEpoch := beaconConfig.FuluForkEpoch
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
return beaconConfig.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
if err != nil {
return 0, errors.Wrap(err, "epoch start")
}
return forkFuluSlot, nil
}

View File

@@ -31,6 +31,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/trie"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -51,6 +52,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
srv.Stop()
})
bState, _ := util.DeterministicGenesisState(t, 10)
genesis.StoreStateDuringTest(t, bState)
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
require.NoError(t, err)
mockTrie, err := trie.NewTrie(0)
@@ -71,20 +73,22 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
DepositContainers: []*ethpb.DepositContainer{},
})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
require.NoError(t, err)
web3Service, err = execution.NewService(
ctx,
execution.WithDatabase(beaconDB),
execution.WithHttpEndpoint(endpoint),
execution.WithDepositContractAddress(common.Address{}),
execution.WithDepositCache(depositCache),
)
require.NoError(t, err, "Unable to set up web3 service")
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
require.NoError(t, err)
depositCache, err := depositsnapshot.New()
require.NoError(t, err)
fc := doublylinkedtree.New()
stateGen := stategen.New(beaconDB, fc)
// Safe a state in stategen to purposes of testing a service stop / shutdown.
@@ -396,24 +400,6 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
}
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
ctx := t.Context()
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
mgs := &MockClockSetter{}
service.clockSetter = mgs
gt := time.Now()
service.onExecutionChainStart(t.Context(), gt)
gs, err := beaconDB.GenesisState(ctx)
require.NoError(t, err)
require.NotEqual(t, nil, gs)
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
var zero [32]byte
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
require.Equal(t, gt, mgs.G.GenesisTime())
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
}
func BenchmarkHasBlockDB(b *testing.B) {
ctx := b.Context()
s := testServiceWithDB(b)

View File

@@ -30,6 +30,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/libp2p/go-libp2p/core/peer"
"google.golang.org/protobuf/proto"
)
@@ -54,6 +55,7 @@ type mockBroadcaster struct {
type mockAccessor struct {
mockBroadcaster
mockDataColumnsHandler
p2pTesting.MockPeerManager
}
@@ -87,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar, _ ...chan<- bool) error {
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
mb.broadcastCalled = true
return nil
}
@@ -97,6 +99,43 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
// mockDataColumnsHandler is a mock implementation of p2p.DataColumnsHandler
type mockDataColumnsHandler struct {
mut sync.RWMutex
earliestAvailableSlot primitives.Slot
custodyGroupCount uint64
}
func (dch *mockDataColumnsHandler) EarliestAvailableSlot() primitives.Slot {
dch.mut.RLock()
defer dch.mut.RUnlock()
return dch.earliestAvailableSlot
}
func (dch *mockDataColumnsHandler) CustodyGroupCount() uint64 {
dch.mut.RLock()
defer dch.mut.RUnlock()
return dch.custodyGroupCount
}
func (dch *mockDataColumnsHandler) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
dch.mut.Lock()
defer dch.mut.Unlock()
dch.earliestAvailableSlot = earliestAvailableSlot
dch.custodyGroupCount = custodyGroupCount
return earliestAvailableSlot, custodyGroupCount, nil
}
func (dch *mockDataColumnsHandler) CustodyGroupCountFromPeer(peer.ID) uint64 {
return 0
}
var _ p2p.DataColumnsHandler = (*mockDataColumnsHandler)(nil)
type testServiceRequirements struct {
ctx context.Context
db db.Database

View File

@@ -68,7 +68,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
log.WithError(err).Error("Failed to check builder status")
} else {
log.WithField("endpoint", s.c.NodeURL()).Info("Builder has been configured")
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
}
}

View File

@@ -29,6 +29,7 @@ type MockBuilderService struct {
PayloadCapella *v1.ExecutionPayloadCapella
PayloadDeneb *v1.ExecutionPayloadDeneb
BlobBundle *v1.BlobsBundle
BlobBundleV2 *v1.BlobsBundleV2
ErrSubmitBlindedBlock error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
@@ -66,6 +67,16 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
}
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
case version.Fulu:
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap deneb payload for fulu")
}
// For Fulu, return BlobsBundleV2 if available, otherwise regular BlobsBundle
if s.BlobBundleV2 != nil {
return w, s.BlobBundleV2, s.ErrSubmitBlindedBlock
}
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
default:
return nil, nil, errors.New("unknown block version for mocking")
}

View File

@@ -41,7 +41,6 @@ go_library(
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",

View File

@@ -11,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -98,7 +97,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
currentEpoch := slots.ToEpoch(header.Header.Slot)
fork, err := forks.Fork(currentEpoch)
fork, err := params.Fork(currentEpoch)
if err != nil {
return err
}
@@ -119,7 +118,7 @@ func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, h
// via the respective epoch.
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
currentEpoch := slots.ToEpoch(blk.Block().Slot())
fork, err := forks.Fork(currentEpoch)
fork, err := params.Fork(currentEpoch)
if err != nil {
return err
}

View File

@@ -206,9 +206,9 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
// value defined:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#configuration
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
func MinEpochsForBlockRequests() primitives.Epoch {
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)

View File

@@ -292,7 +292,7 @@ func TestMinEpochsForBlockRequests(t *testing.T) {
params.SetActiveTestCleanup(t, params.MainnetConfig())
var expected primitives.Epoch = 33024
// expected value of 33024 via spec commentary:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
//
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (

View File

@@ -26,6 +26,7 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -26,14 +26,12 @@ const ErrNotEnoughSyncCommitteeBits = "sync committee bits count is less than re
func NewLightClientFinalityUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState,
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
finalizedBlock interfaces.ReadOnlySignedBeaconBlock,
) (interfaces.LightClientFinalityUpdate, error) {
update, err := NewLightClientUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock, finalizedBlock)
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientFinalityUpdate, error) {
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
if err != nil {
return nil, err
}
@@ -43,13 +41,11 @@ func NewLightClientFinalityUpdateFromBeaconState(
func NewLightClientOptimisticUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState,
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
) (interfaces.LightClientOptimisticUpdate, error) {
update, err := NewLightClientUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock, nil)
attestedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientOptimisticUpdate, error) {
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, nil)
if err != nil {
return nil, err
}
@@ -66,7 +62,6 @@ func NewLightClientOptimisticUpdateFromBeaconState(
// if locally available (may be unavailable, e.g., when using checkpoint sync, or if it was pruned locally)
func NewLightClientUpdateFromBeaconState(
ctx context.Context,
currentSlot primitives.Slot,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState,
@@ -521,8 +516,7 @@ func ComputeWithdrawalsRoot(payload interfaces.ExecutionData) ([]byte, error) {
func BlockToLightClientHeader(
ctx context.Context,
attestedBlockVersion int, // this is the version that the light client header should be in, based on the attested block.
block interfaces.ReadOnlySignedBeaconBlock, // this block is either the attested block, or the finalized block.
// in case of the latter, we might need to upgrade it to the attested block's version.
block interfaces.ReadOnlySignedBeaconBlock, // this block is either the attested block, or the finalized block. in case of the latter, we might need to upgrade it to the attested block's version.
) (interfaces.LightClientHeader, error) {
if block.Version() > attestedBlockVersion {
return nil, errors.Errorf("block version %s is greater than attested block version %s", version.String(block.Version()), version.String(attestedBlockVersion))
@@ -755,3 +749,33 @@ func UpdateHasSupermajority(syncAggregate *pb.SyncAggregate) bool {
numActiveParticipants := syncAggregate.SyncCommitteeBits.Count()
return numActiveParticipants*3 >= maxActiveParticipants*2
}
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
if oldUpdate == nil {
return true
}
// The finalized_header.beacon.slot is greater than that of all previously forwarded finality_updates,
// or it matches the highest previously forwarded slot and also has a sync_aggregate indicating supermajority (> 2/3)
// sync committee participation while the previously forwarded finality_update for that slot did not indicate supermajority
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
newHasSupermajority := UpdateHasSupermajority(newUpdate.SyncAggregate())
lastUpdateSlot := oldUpdate.FinalizedHeader().Beacon().Slot
lastHasSupermajority := UpdateHasSupermajority(oldUpdate.SyncAggregate())
if newUpdateSlot < lastUpdateSlot {
return false
}
if newUpdateSlot == lastUpdateSlot && (lastHasSupermajority || !newHasSupermajority) {
return false
}
return true
}
func IsBetterOptimisticUpdate(newUpdate, oldUpdate interfaces.LightClientOptimisticUpdate) bool {
if oldUpdate == nil {
return true
}
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
return newUpdate.AttestedHeader().Beacon().Slot > oldUpdate.AttestedHeader().Beacon().Slot
}

View File

@@ -34,7 +34,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
t.Run("Altair", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Altair)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot(), "Signature slot is not equal")
@@ -46,7 +46,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
t.Run("Capella", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -59,7 +59,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
t.Run("Deneb", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -72,7 +72,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
t.Run("Electra", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Electra)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -97,7 +97,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
l := util.NewTestLightClient(t, version.Altair)
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -132,7 +132,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -206,7 +206,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Capella, util.WithFinalizedCheckpointInPrevFork())
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -240,7 +240,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -315,7 +315,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Deneb, util.WithFinalizedCheckpointInPrevFork())
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -392,7 +392,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Electra)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
@@ -467,7 +467,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Electra, util.WithFinalizedCheckpointInPrevFork())
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")

View File

@@ -7,6 +7,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
@@ -41,10 +42,31 @@ func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (i
return bootstrap, nil
}
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte, bootstrap interfaces.LightClientBootstrap) error {
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte) error {
s.mu.Lock()
defer s.mu.Unlock()
blk, err := s.beaconDB.Block(ctx, blockRoot)
if err != nil {
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
}
if blk == nil {
return errors.Errorf("failed to fetch block for root %x", blockRoot)
}
state, err := s.beaconDB.State(ctx, blockRoot)
if err != nil {
return errors.Wrapf(err, "failed to fetch state for block root %x", blockRoot)
}
if state == nil {
return errors.Errorf("failed to fetch state for block root %x", blockRoot)
}
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
if err != nil {
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
}
// Save the light client bootstrap to the database
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
return err
@@ -52,6 +74,75 @@ func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte
return nil
}
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) ([]interfaces.LightClientUpdate, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Fetch the light client updatesMap from the database
updatesMap, err := s.beaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
if err != nil {
return nil, err
}
var updates []interfaces.LightClientUpdate
for i := startPeriod; i <= endPeriod; i++ {
update, ok := updatesMap[i]
if !ok {
// Only return the first contiguous range of updates
break
}
updates = append(updates, update)
}
return updates, nil
}
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Fetch the light client update for the given period from the database
update, err := s.beaconDB.LightClientUpdate(ctx, period)
if err != nil {
return nil, err
}
return update, nil
}
func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update interfaces.LightClientUpdate) error {
s.mu.Lock()
defer s.mu.Unlock()
oldUpdate, err := s.beaconDB.LightClientUpdate(ctx, period)
if err != nil {
return errors.Wrapf(err, "could not get current light client update")
}
if oldUpdate == nil {
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
return errors.Wrapf(err, "could not save light client update")
}
log.WithField("period", period).Debug("Saved new light client update")
return nil
}
isNewUpdateBetter, err := IsBetterUpdate(update, oldUpdate)
if err != nil {
return errors.Wrapf(err, "could not compare light client updates")
}
if isNewUpdateBetter {
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
return errors.Wrapf(err, "could not save light client update")
}
log.WithField("period", period).Debug("Saved new light client update")
return nil
}
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
return nil
}
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate) {
s.mu.Lock()
defer s.mu.Unlock()

View File

@@ -25,18 +25,18 @@ func TestLightClientStore(t *testing.T) {
// Create test light client updates for Capella and Deneb
lCapella := util.NewTestLightClient(t, version.Capella)
opUpdateCapella, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State.Slot(), lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
opUpdateCapella, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, opUpdateCapella, "OptimisticUpdateCapella is nil")
finUpdateCapella, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State.Slot(), lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
finUpdateCapella, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, finUpdateCapella, "FinalityUpdateCapella is nil")
lDeneb := util.NewTestLightClient(t, version.Deneb)
opUpdateDeneb, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State.Slot(), lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
opUpdateDeneb, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
require.NoError(t, err)
require.NotNil(t, opUpdateDeneb, "OptimisticUpdateDeneb is nil")
finUpdateDeneb, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State.Slot(), lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
finUpdateDeneb, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
require.NoError(t, err)
require.NotNil(t, finUpdateDeneb, "FinalityUpdateDeneb is nil")

View File

@@ -16,7 +16,6 @@ go_library(
deps = [
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/state:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -53,7 +52,6 @@ go_test(
":go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -41,17 +41,17 @@ const (
// CustodyGroups computes the custody groups the node should participate in for custody.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#get_custody_groups
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error) {
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
// Check if the custody group count is larger than the number of custody groups.
if custodyGroupCount > numberOfCustodyGroup {
if custodyGroupCount > numberOfCustodyGroups {
return nil, ErrCustodyGroupCountTooLarge
}
// Shortcut if all custody groups are needed.
if custodyGroupCount == numberOfCustodyGroup {
custodyGroups := make([]uint64, 0, numberOfCustodyGroup)
for i := range numberOfCustodyGroup {
if custodyGroupCount == numberOfCustodyGroups {
custodyGroups := make([]uint64, 0, numberOfCustodyGroups)
for i := range numberOfCustodyGroups {
custodyGroups = append(custodyGroups, i)
}
@@ -73,7 +73,7 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
// Get the custody group ID.
custodyGroup := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup
custodyGroup := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroups
// Add the custody group to the map.
if !custodyGroupsMap[custodyGroup] {
@@ -88,9 +88,6 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
// Increment the current ID.
currentId.Add(currentId, one)
}
// Sort the custody groups.
slices.Sort[[]uint64](custodyGroups)
}
// Final check.
@@ -98,6 +95,9 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
return nil, errWrongComputedCustodyGroupCount
}
// Sort the custody groups.
slices.Sort[[]uint64](custodyGroups)
return custodyGroups, nil
}
@@ -105,19 +105,19 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#compute_columns_for_custody_group
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
beaconConfig := params.BeaconConfig()
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
if custodyGroup >= numberOfCustodyGroup {
if custodyGroup >= numberOfCustodyGroups {
return nil, ErrCustodyGroupTooLarge
}
numberOfColumns := beaconConfig.NumberOfColumns
columnsPerGroup := numberOfColumns / numberOfCustodyGroup
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
columns := make([]uint64, 0, columnsPerGroup)
for i := range columnsPerGroup {
column := numberOfCustodyGroup*i + custodyGroup
column := numberOfCustodyGroups*i + custodyGroup
columns = append(columns, column)
}
@@ -127,7 +127,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
// The returned value contains pointers to function parameters.
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#get_data_column_sidecars
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars_from_block
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
return nil, nil
@@ -151,7 +151,7 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsA
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
if err != nil {
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
return nil, errors.Wrap(err, "merkle proof KZG commitments")
}
dataColumnSidecars, err := dataColumnsSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
@@ -176,19 +176,6 @@ func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
return columnIndex % numberOfCustodyGroups, nil
}
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#custody-sampling
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
if ct == Actual {
custodyGroupCount = custodyInfo.ActualGroupCount()
}
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
return max(samplesPerSlot, custodyGroupCount)
}
// CustodyColumns computes the custody columns from the custody groups.
func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
@@ -219,6 +206,7 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
// the KZG commitment includion proofs and cells and cell proofs.
// The returned value contains pointers to function parameters.
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars
func dataColumnsSidecars(
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
blobKzgCommitments [][]byte,

View File

@@ -17,8 +17,8 @@ func TestCustodyGroups(t *testing.T) {
// --------------------------------------------
// The happy path is unit tested in spec tests.
// --------------------------------------------
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
_, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroup+1)
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
_, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroups+1)
require.ErrorIs(t, err, peerdas.ErrCustodyGroupCountTooLarge)
}
@@ -26,8 +26,8 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
// --------------------------------------------
// The happy path is unit tested in spec tests.
// --------------------------------------------
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
_, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroup)
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
_, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroups)
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
}
@@ -104,62 +104,6 @@ func TestComputeCustodyGroupForColumn(t *testing.T) {
})
}
func TestCustodyGroupSamplingSize(t *testing.T) {
testCases := []struct {
name string
custodyType peerdas.CustodyType
validatorsCustodyRequirement uint64
toAdvertiseCustodyGroupCount uint64
expected uint64
}{
{
name: "target, lower than samples per slot",
custodyType: peerdas.Target,
validatorsCustodyRequirement: 2,
expected: 8,
},
{
name: "target, higher than samples per slot",
custodyType: peerdas.Target,
validatorsCustodyRequirement: 100,
expected: 100,
},
{
name: "actual, lower than samples per slot",
custodyType: peerdas.Actual,
validatorsCustodyRequirement: 3,
toAdvertiseCustodyGroupCount: 4,
expected: 8,
},
{
name: "actual, higher than samples per slot",
custodyType: peerdas.Actual,
validatorsCustodyRequirement: 100,
toAdvertiseCustodyGroupCount: 101,
expected: 100,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Create a custody info.
custodyInfo := peerdas.CustodyInfo{}
// Set the validators custody requirement for target custody group count.
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
// Set the to advertise custody group count.
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
// Compute the custody group sampling size.
actual := custodyInfo.CustodyGroupSamplingSize(tc.custodyType)
// Check the result.
require.Equal(t, tc.expected, actual)
})
}
}
func TestCustodyColumns(t *testing.T) {
t.Run("group too large", func(t *testing.T) {
_, err := peerdas.CustodyColumns([]uint64{1_000_000})

View File

@@ -4,45 +4,17 @@ import (
"encoding/binary"
"sync"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/ethereum/go-ethereum/p2p/enode"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
)
// info contains all useful peerDAS related information regarding a peer.
type (
info struct {
CustodyGroups map[uint64]bool
CustodyColumns map[uint64]bool
DataColumnsSubnets map[uint64]bool
}
targetCustodyGroupCount struct {
mut sync.RWMutex
validatorsCustodyRequirement uint64
}
toAdverstiseCustodyGroupCount struct {
mut sync.RWMutex
value uint64
}
CustodyInfo struct {
// Mut is a mutex to be used by caller to ensure neither
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
// (This is not necessary to use this mutex for any data protection.)
Mut sync.RWMutex
// TargetGroupCount represents the target number of custody groups we should custody
// regarding the validators we are tracking.
TargetGroupCount targetCustodyGroupCount
// ToAdvertiseGroupCount represents the number of custody groups to advertise to the network.
ToAdvertiseGroupCount toAdverstiseCustodyGroupCount
}
)
type info struct {
CustodyGroups map[uint64]bool
CustodyColumns map[uint64]bool
DataColumnsSubnets map[uint64]bool
}
const (
nodeInfoCacheSize = 200
@@ -109,61 +81,6 @@ func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
return result, false, nil
}
// ActualGroupCount returns the actual custody group count.
func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 {
return min(custodyInfo.TargetGroupCount.Get(), custodyInfo.ToAdvertiseGroupCount.Get())
}
// CustodyGroupCount returns the number of groups we should participate in for custody.
func (tcgc *targetCustodyGroupCount) Get() uint64 {
// If subscribed to all subnets, return the number of custody groups.
if flags.Get().SubscribeAllDataSubnets {
return params.BeaconConfig().NumberOfCustodyGroups
}
tcgc.mut.RLock()
defer tcgc.mut.RUnlock()
// If no validators are tracked, return the default custody requirement.
if tcgc.validatorsCustodyRequirement == 0 {
return params.BeaconConfig().CustodyRequirement
}
// Return the validators custody requirement.
return tcgc.validatorsCustodyRequirement
}
// setValidatorsCustodyRequirement sets the validators custody requirement.
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
tcgc.mut.Lock()
defer tcgc.mut.Unlock()
tcgc.validatorsCustodyRequirement = value
}
// Get returns the to advertise custody group count.
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
// If subscribed to all subnets, return the number of custody groups.
if flags.Get().SubscribeAllDataSubnets {
return params.BeaconConfig().NumberOfCustodyGroups
}
custodyRequirement := params.BeaconConfig().CustodyRequirement
tacgc.mut.RLock()
defer tacgc.mut.RUnlock()
return max(tacgc.value, custodyRequirement)
}
// Set sets the to advertise custody group count.
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
tacgc.mut.Lock()
defer tacgc.mut.Unlock()
tacgc.value = value
}
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
func createInfoCacheIfNeeded() error {
nodeInfoCacheMut.Lock()

View File

@@ -4,7 +4,6 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/ethereum/go-ethereum/p2p/enode"
)
@@ -26,108 +25,3 @@ func TestInfo(t *testing.T) {
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
}
}
func TestTargetCustodyGroupCount(t *testing.T) {
testCases := []struct {
name string
subscribeToAllColumns bool
validatorsCustodyRequirement uint64
expected uint64
}{
{
name: "subscribed to all data subnets",
subscribeToAllColumns: true,
validatorsCustodyRequirement: 100,
expected: 128,
},
{
name: "no validators attached",
subscribeToAllColumns: false,
validatorsCustodyRequirement: 0,
expected: 4,
},
{
name: "some validators attached",
subscribeToAllColumns: false,
validatorsCustodyRequirement: 100,
expected: 100,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Subscribe to all subnets if needed.
if tc.subscribeToAllColumns {
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
}
var custodyInfo peerdas.CustodyInfo
// Set the validators custody requirement.
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
// Get the target custody group count.
actual := custodyInfo.TargetGroupCount.Get()
// Compare the expected and actual values.
require.Equal(t, tc.expected, actual)
})
}
}
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
testCases := []struct {
name string
subscribeToAllColumns bool
toAdvertiseCustodyGroupCount uint64
expected uint64
}{
{
name: "subscribed to all subnets",
subscribeToAllColumns: true,
toAdvertiseCustodyGroupCount: 100,
expected: 128,
},
{
name: "higher than custody requirement",
subscribeToAllColumns: false,
toAdvertiseCustodyGroupCount: 100,
expected: 100,
},
{
name: "lower than custody requirement",
subscribeToAllColumns: false,
toAdvertiseCustodyGroupCount: 1,
expected: 4,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Subscribe to all subnets if needed.
if tc.subscribeToAllColumns {
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
}
// Create a custody info.
var custodyInfo peerdas.CustodyInfo
// Set the to advertise custody group count.
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
// Get the to advertise custody group count.
actual := custodyInfo.ToAdvertiseGroupCount.Get()
// Compare the expected and actual values.
require.Equal(t, tc.expected, actual)
})
}
}

View File

@@ -123,7 +123,7 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
// cell proofs corresponding the extended blobs. The main purpose of this function is to
// construct data columns sidecars from data obtained from the execution client via:
// construct data column sidecars from data obtained from the execution client via:
// - `engine_getBlobsV2` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2, or
// - `engine_getPayloadV5` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getpayloadv5
// Note: In this function, to stick with the `BlobsBundleV2` format returned by the execution client in `engine_getPayloadV5`,
@@ -222,8 +222,8 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
// Check if the data column sidecars are aligned with the block.
dataColumnSidecars := make([]blocks.RODataColumn, 0, len(verifiedDataColumnSidecars))
for _, verifiedDataColumnSidecar := range verifiedDataColumnSidecars {
dataColumnSicecar := verifiedDataColumnSidecar.RODataColumn
dataColumnSidecars = append(dataColumnSidecars, dataColumnSicecar)
dataColumnSidecar := verifiedDataColumnSidecar.RODataColumn
dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar)
}
if err := DataColumnsAlignWithBlock(block, dataColumnSidecars); err != nil {
@@ -241,7 +241,7 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
return blobSidecars, nil
}
// We need to reconstruct the blobs.
// We need to reconstruct the data column sidecars.
reconstructedDataColumnSidecars, err := ReconstructDataColumnSidecars(verifiedDataColumnSidecars)
if err != nil {
return nil, errors.Wrap(err, "reconstruct data column sidecars")

View File

@@ -196,6 +196,26 @@ func TestReconstructBlobs(t *testing.T) {
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
})
t.Run("consecutive duplicates", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
// [0, 1, 1, 3, 4, ...]
verifiedRoSidecars[2] = verifiedRoSidecars[1]
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
})
t.Run("non-consecutive duplicates", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
// [0, 1, 2, 1, 4, ...]
verifiedRoSidecars[3] = verifiedRoSidecars[1]
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
})
t.Run("not enough columns", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)

View File

@@ -21,10 +21,10 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
}
beaconConfig := params.BeaconConfig()
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
count := totalNodeBalance / balancePerAdditionalCustodyGroup
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroup), nil
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
}

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"domain.go",
"signature.go",
"signing_root.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
@@ -25,7 +24,6 @@ go_test(
name = "go_default_test",
srcs = [
"domain_test.go",
"signature_test.go",
"signing_root_test.go",
],
embed = [":go_default_library"],

View File

@@ -1,34 +0,0 @@
package signing
import (
"github.com/OffchainLabs/prysm/v6/config/params"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
var ErrNilRegistration = errors.New("nil signed registration")
// VerifyRegistrationSignature verifies the signature of a validator's registration.
func VerifyRegistrationSignature(
sr *ethpb.SignedValidatorRegistrationV1,
) error {
if sr == nil || sr.Message == nil {
return ErrNilRegistration
}
d := params.BeaconConfig().DomainApplicationBuilder
// Per spec, we want the fork version and genesis validator to be nil.
// Which is genesis value and zero by default.
sd, err := ComputeDomain(
d,
nil, /* fork version */
nil /* genesis val root */)
if err != nil {
return err
}
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
return ErrSigFailedToVerify
}
return nil
}

View File

@@ -1,42 +0,0 @@
package signing_test
import (
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestVerifyRegistrationSignature(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
reg := &ethpb.ValidatorRegistrationV1{
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
GasLimit: 123456,
Timestamp: uint64(time.Now().Unix()),
Pubkey: sk.PublicKey().Marshal(),
}
d := params.BeaconConfig().DomainApplicationBuilder
domain, err := signing.ComputeDomain(d, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(reg, domain)
require.NoError(t, err)
sk.Sign(sr[:]).Marshal()
sReg := &ethpb.SignedValidatorRegistrationV1{
Message: reg,
Signature: sk.Sign(sr[:]).Marshal(),
}
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
sReg.Signature = []byte("bad")
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
sReg.Message = nil
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
}

View File

@@ -39,10 +39,8 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -22,8 +22,8 @@ type LazilyPersistentStoreColumn struct {
store *filesystem.DataColumnStorage
nodeID enode.ID
cache *dataColumnCache
custodyInfo *peerdas.CustodyInfo
newDataColumnsVerifier verification.NewDataColumnsVerifier
custodyGroupCount uint64
}
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
@@ -38,13 +38,18 @@ type DataColumnsVerifier interface {
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, newDataColumnsVerifier verification.NewDataColumnsVerifier, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
func NewLazilyPersistentStoreColumn(
store *filesystem.DataColumnStorage,
nodeID enode.ID,
newDataColumnsVerifier verification.NewDataColumnsVerifier,
custodyGroupCount uint64,
) *LazilyPersistentStoreColumn {
return &LazilyPersistentStoreColumn{
store: store,
nodeID: nodeID,
cache: newDataColumnCache(),
custodyInfo: custodyInfo,
newDataColumnsVerifier: newDataColumnsVerifier,
custodyGroupCount: custodyGroupCount,
}
}
@@ -126,11 +131,11 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, curre
return errors.Wrap(err, "entry filter")
}
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
if err := verifier.ValidFields(); err != nil {
return errors.Wrap(err, "valid")
return errors.Wrap(err, "valid fields")
}
if err := verifier.SidecarInclusionProven(); err != nil {
@@ -155,6 +160,8 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, curre
// fullCommitmentsToCheck returns the commitments to check for a given block.
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// Return early for blocks that are pre-Fulu.
if block.Version() < version.Fulu {
return &safeCommitmentsArray{}, nil
@@ -164,7 +171,7 @@ func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, bl
blockSlot := block.Block().Slot()
blockEpoch := slots.ToEpoch(blockSlot)
// Compute the current spoch.
// Compute the current epoch.
currentEpoch := slots.ToEpoch(currentSlot)
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
@@ -183,11 +190,9 @@ func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, bl
return &safeCommitmentsArray{}, nil
}
// Retrieve the groups count.
custodyGroupCount := s.custodyInfo.ActualGroupCount()
// Retrieve peer info.
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
if err != nil {
return nil, errors.Wrap(err, "peer info")
}

View File

@@ -4,10 +4,8 @@ import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -29,7 +27,7 @@ var commitments = [][]byte{
func TestPersist(t *testing.T) {
t.Run("no sidecars", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(0)
require.NoError(t, err)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
@@ -44,7 +42,7 @@ func TestPersist(t *testing.T) {
}
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
require.ErrorIs(t, err, errMixedRoots)
@@ -59,7 +57,7 @@ func TestPersist(t *testing.T) {
}
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
require.NoError(t, err)
@@ -76,7 +74,7 @@ func TestPersist(t *testing.T) {
}
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
require.NoError(t, err)
@@ -114,7 +112,7 @@ func TestIsDataAvailable(t *testing.T) {
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
require.NoError(t, err)
@@ -135,9 +133,9 @@ func TestIsDataAvailable(t *testing.T) {
root := signedRoBlock.Root()
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
indices := [...]uint64{1, 17, 87, 102}
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
for _, index := range indices {
dataColumnParams := util.DataColumnParam{
@@ -221,14 +219,10 @@ func TestFullCommitmentsToCheck(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
numberOfColumns := params.BeaconConfig().NumberOfColumns
b := tc.block(t)
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, &peerdas.CustodyInfo{})
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
require.NoError(t, err)

View File

@@ -13,6 +13,7 @@ go_library(
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/beacon-chain:__subpackages__",
"//genesis:__subpackages__",
"//testing/slasher/simulator:__pkg__",
"//tools:__subpackages__",
],

View File

@@ -251,7 +251,7 @@ func (dcs *DataColumnStorage) Summary(root [fieldparams.RootLength]byte) DataCol
}
// Save saves data column sidecars into the database and asynchronously performs pruning.
// The returned chanel is closed when the pruning is complete.
// The returned channel is closed when the pruning is complete.
func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
startTime := time.Now()
@@ -266,8 +266,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
return errWrongNumberOfColumns
}
highestEpoch := primitives.Epoch(0)
dataColumnSidecarsbyRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
dataColumnSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
// Group data column sidecars by root.
for _, dataColumnSidecar := range dataColumnSidecars {
@@ -278,23 +277,20 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
// Group data column sidecars by root.
root := dataColumnSidecar.BlockRoot()
dataColumnSidecarsbyRoot[root] = append(dataColumnSidecarsbyRoot[root], dataColumnSidecar)
dataColumnSidecarsByRoot[root] = append(dataColumnSidecarsByRoot[root], dataColumnSidecar)
}
for root, dataColumnSidecars := range dataColumnSidecarsbyRoot {
for root, dataColumnSidecars := range dataColumnSidecarsByRoot {
// Safety check all data column sidecars for this root are from the same slot.
firstSlot := dataColumnSidecars[0].SignedBlockHeader.Header.Slot
slot := dataColumnSidecars[0].Slot()
for _, dataColumnSidecar := range dataColumnSidecars[1:] {
if dataColumnSidecar.SignedBlockHeader.Header.Slot != firstSlot {
if dataColumnSidecar.Slot() != slot {
return errDataColumnSidecarsFromDifferentSlots
}
}
// Set the highest epoch.
epoch := slots.ToEpoch(dataColumnSidecars[0].Slot())
highestEpoch = max(highestEpoch, epoch)
// Save data columns in the filesystem.
epoch := slots.ToEpoch(slot)
if err := dcs.saveFilesystem(root, epoch, dataColumnSidecars); err != nil {
return errors.Wrap(err, "save filesystem")
}
@@ -306,7 +302,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
}
// Compute the data columns ident.
dataColumnsIdent := DataColumnsIdent{Root: root, Epoch: slots.ToEpoch(dataColumnSidecars[0].Slot()), Indices: indices}
dataColumnsIdent := DataColumnsIdent{Root: root, Epoch: epoch, Indices: indices}
// Set data columns in the cache.
if err := dcs.cache.set(dataColumnsIdent); err != nil {

View File

@@ -20,7 +20,7 @@ File organisation
The remaining 7 bits (from 0 to 127) represent the index of the data column.
This sentinel bit is needed to distinguish between the column with index 0 and no column.
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
- The rest of the file is a repeat of the SSZ encoded data columns sidecars.
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
|------------------------------------------|------------------------------------------------------------------------------------|
@@ -75,7 +75,7 @@ data-columns
Computation of the maximum size of a DataColumnSidecar
------------------------------------------------------
https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#datacolumnsidecar
https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar
class DataColumnSidecar(Container):

View File

@@ -33,6 +33,7 @@ type ReadOnlyDatabase interface {
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
HighestRootsBelowSlot(ctx context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error)
EarliestSlot(ctx context.Context) (primitives.Slot, error)
// State related methods.
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
@@ -56,14 +57,16 @@ type ReadOnlyDatabase interface {
// Fee recipients operations.
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
// light client operations
// Light client operations
LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]interfaces.LightClientUpdate, error)
LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error)
LightClientBootstrap(ctx context.Context, blockRoot []byte) (interfaces.LightClientBootstrap, error)
// origin checkpoint sync support
// Origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
// Custody operations.
CustodyInfo(ctx context.Context) (uint64, uint64, error)
SubscribedToAllDataSubnets(ctx context.Context) (bool, error)
}
// NoHeadAccessDatabase defines a struct without access to chain head data.
@@ -102,6 +105,12 @@ type NoHeadAccessDatabase interface {
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
// Custody operations.
SaveCustodyGroupCount(ctx context.Context, custodyGroupCount uint64) error
SaveSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) error
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
UpdateCustodyInfo(ctx context.Context, custodyGroupCount uint64, earliestAvailableSlot primitives.Slot) (uint64, primitives.Slot, error)
}
// HeadAccessDatabase defines a struct with access to reading chain head data.

View File

@@ -8,6 +8,7 @@ go_library(
"backup.go",
"blocks.go",
"checkpoint.go",
"custody.go",
"deposit_contract.go",
"encoding.go",
"error.go",
@@ -38,7 +39,6 @@ go_library(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
@@ -50,6 +50,7 @@ go_library(
"//container/slice:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//genesis:go_default_library",
"//io/file:go_default_library",
"//monitoring/progress:go_default_library",
"//monitoring/tracing:go_default_library",
@@ -106,7 +107,6 @@ go_test(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
@@ -116,6 +116,8 @@ go_test(
"//consensus-types/light-client:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//genesis/embedded:go_default_library",
"//proto/dbval:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -860,6 +860,47 @@ func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primi
})
}
// EarliestStoredSlot returns the earliest slot in the database.
func (s *Store) EarliestSlot(ctx context.Context) (primitives.Slot, error) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
_, span := trace.StartSpan(ctx, "BeaconDB.EarliestSlot")
defer span.End()
earliestAvailableSlot := primitives.Slot(0)
err := s.db.View(func(tx *bolt.Tx) error {
// Retrieve the root corresponding to the earliest available block.
c := tx.Bucket(blockSlotIndicesBucket).Cursor()
k, v := c.First()
if k == nil || v == nil {
return ErrNotFound
}
slot := bytesutil.BytesToSlotBigEndian(k)
// The genesis block may be indexed in this bucket, even if we started from a checkpoint.
// Because of this, we check the next block. If the next block is still in the genesis epoch,
// then we consider we have the whole chain.
if slot != 0 {
earliestAvailableSlot = slot
}
k, v = c.Next()
if k == nil || v == nil {
// Only the genesis block is available.
return nil
}
slot = bytesutil.BytesToSlotBigEndian(k)
if slot < slotsPerEpoch {
// We are still in the genesis epoch, so we consider we have the whole chain.
return nil
}
earliestAvailableSlot = slot
return nil
})
return earliestAvailableSlot, err
}
type slotRoot struct {
slot primitives.Slot
root [32]byte
@@ -883,7 +924,7 @@ func (s *Store) slotRootsInRange(ctx context.Context, start, end primitives.Slot
c := bkt.Cursor()
for k, v := c.Seek(key); ; /* rely on internal checks to exit */ k, v = c.Prev() {
if len(k) == 0 && len(v) == 0 {
// The `edge`` variable and this `if` deal with 2 edge cases:
// The `edge` variable and this `if` deal with 2 edge cases:
// - Seeking past the end of the bucket (the `end` param is higher than the highest slot).
// - Seeking before the beginning of the bucket (the `start` param is lower than the lowest slot).
// In both of these cases k,v will be nil and we can handle the same way using `edge` to

View File

@@ -0,0 +1,231 @@
package kv
import (
"context"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/pkg/errors"
bolt "go.etcd.io/bbolt"
)
// CustodyInfo returns the custody group count and the earliest available slot in the database.
func (s *Store) CustodyInfo(ctx context.Context) (uint64, uint64, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.CustodyInfo")
defer span.End()
groupCount, earliestAvailableSlot := uint64(0), uint64(0)
err := s.db.View(func(tx *bolt.Tx) error {
// Retrieve the custody bucket.
bucket := tx.Bucket(custodyBucket)
if bucket == nil {
return nil
}
// Retrieve the group count.
bytes := bucket.Get(groupCountKey)
if len(bytes) != 0 {
groupCount = bytesutil.BytesToUint64BigEndian(bytes)
}
// Retrieve the earliest available slot.
earliestSlotBytes := bucket.Get(earliestAvailableSlotKey)
if len(earliestSlotBytes) != 0 {
earliestAvailableSlot = bytesutil.BytesToUint64BigEndian(earliestSlotBytes)
}
return nil
})
return groupCount, earliestAvailableSlot, err
}
// UpdateCustodyInfo atomically updates the custody group count only it is greater than the stored one.
// In this case, it also updates the earliest available slot with the provided value.
// It returns the stored custody group count and earliest available slot.
func (s *Store) UpdateCustodyInfo(ctx context.Context, custodyGroupCount uint64, earliestAvailableSlot primitives.Slot) (uint64, primitives.Slot, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateCustodyInfo")
defer span.End()
storedGroupCount, storedEarliestAvailableSlot := uint64(0), primitives.Slot(0)
if err := s.db.Update(func(tx *bolt.Tx) error {
// Retrieve the custody bucket.
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
if err != nil {
return errors.Wrap(err, "create custody bucket")
}
// Retrieve the stored custody group count.
storedGroupCountBytes := bucket.Get(groupCountKey)
if len(storedGroupCountBytes) != 0 {
storedGroupCount = bytesutil.BytesToUint64BigEndian(storedGroupCountBytes)
}
// Retrieve the stored earliest available slot.
storedEarliestAvailableSlotBytes := bucket.Get(earliestAvailableSlotKey)
if len(storedEarliestAvailableSlotBytes) != 0 {
storedEarliestAvailableSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(storedEarliestAvailableSlotBytes))
}
// Exit early if the new custody group count is lower than or equal to the stored one.
if custodyGroupCount <= storedGroupCount {
return nil
}
storedGroupCount, storedEarliestAvailableSlot = custodyGroupCount, earliestAvailableSlot
// Store the earliest available slot.
bytes := bytesutil.Uint64ToBytesBigEndian(uint64(earliestAvailableSlot))
if err := bucket.Put(earliestAvailableSlotKey, bytes); err != nil {
return errors.Wrap(err, "put earliest available slot")
}
// Store the custody group count.
bytes = bytesutil.Uint64ToBytesBigEndian(custodyGroupCount)
if err := bucket.Put(groupCountKey, bytes); err != nil {
return errors.Wrap(err, "put custody group count")
}
return nil
}); err != nil {
return 0, 0, err
}
return storedGroupCount, storedEarliestAvailableSlot, nil
}
// SaveCustodyGroupCount saves the custody group count to the database.
func (s *Store) SaveCustodyGroupCount(ctx context.Context, custodyGroupCount uint64) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SetCustodyGroupCount")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
// Retrieve the custody bucket.
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
if err != nil {
return errors.Wrap(err, "create custody bucket")
}
// Store the custody group count.
custodyGroupCountBytes := bytesutil.Uint64ToBytesBigEndian(custodyGroupCount)
if err := bucket.Put(groupCountKey, custodyGroupCountBytes); err != nil {
return errors.Wrap(err, "put custody group count")
}
return nil
})
}
// SubscribedToAllDataSubnets checks in the database if the node is subscribed to all data subnets.
func (s *Store) SubscribedToAllDataSubnets(ctx context.Context) (bool, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.SubscribedToAllDataSubnets")
defer span.End()
result := false
err := s.db.View(func(tx *bolt.Tx) error {
// Retrieve the custody bucket.
bucket := tx.Bucket(custodyBucket)
if bucket == nil {
return nil
}
// Retrieve the subscribe all data subnets flag.
bytes := bucket.Get(subscribeAllDataSubnetsKey)
if len(bytes) == 0 {
return nil
}
if bytes[0] == 1 {
result = true
}
return nil
})
return result, err
}
// SaveSubscribedToAllDataSubnets saves the subscription status to all data subnets in the database.
func (s *Store) SaveSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSubscribedToAllDataSubnets")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
// Retrieve the custody bucket.
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
if err != nil {
return errors.Wrap(err, "create custody bucket")
}
// Store the subscription status.
value := byte(0)
if subscribed {
value = 1
}
if err := bucket.Put(subscribeAllDataSubnetsKey, []byte{value}); err != nil {
return errors.Wrap(err, "put subscribe all data subnets")
}
return nil
})
}
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
// only if `subscribed` is `true`.
// It returns the previous subscription status.
func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateSubscribedToAllDataSubnets")
defer span.End()
result := false
if !subscribed {
if err := s.db.View(func(tx *bolt.Tx) error {
// Retrieve the custody bucket.
bucket := tx.Bucket(custodyBucket)
if bucket == nil {
return nil
}
// Retrieve the subscribe all data subnets flag.
bytes := bucket.Get(subscribeAllDataSubnetsKey)
if len(bytes) == 0 {
return nil
}
if bytes[0] == 1 {
result = true
}
return nil
}); err != nil {
return false, err
}
return result, nil
}
if err := s.db.Update(func(tx *bolt.Tx) error {
// Retrieve the custody bucket.
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
if err != nil {
return errors.Wrap(err, "create custody bucket")
}
bytes := bucket.Get(subscribeAllDataSubnetsKey)
if len(bytes) != 0 && bytes[0] == 1 {
result = true
}
if err := bucket.Put(subscribeAllDataSubnetsKey, []byte{1}); err != nil {
return errors.Wrap(err, "put subscribe all data subnets")
}
return nil
}); err != nil {
return false, err
}
return result, nil
}

View File

@@ -8,6 +8,7 @@ import (
dbIface "github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
"github.com/OffchainLabs/prysm/v6/genesis"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
@@ -97,8 +98,22 @@ func (s *Store) EnsureEmbeddedGenesis(ctx context.Context) error {
if err != nil {
return err
}
if gs != nil && !gs.IsNil() {
if !state.IsNil(gs) {
return s.SaveGenesisData(ctx, gs)
}
return nil
}
type LegacyGenesisProvider struct {
store *Store
}
func NewLegacyGenesisProvider(store *Store) *LegacyGenesisProvider {
return &LegacyGenesisProvider{store: store}
}
var _ genesis.Provider = &LegacyGenesisProvider{}
func (p *LegacyGenesisProvider) Genesis(ctx context.Context) (state.BeaconState, error) {
return p.store.LegacyGenesisState(ctx)
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
@@ -152,6 +153,7 @@ func TestEnsureEmbeddedGenesis(t *testing.T) {
require.NoError(t, undo())
}()
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
ctx := t.Context()
db := setupDB(t)

View File

@@ -123,6 +123,7 @@ var Buckets = [][]byte{
feeRecipientBucket,
registrationBucket,
custodyBucket,
}
// KVStoreOption is a functional option that modifies a kv.Store.

View File

@@ -70,4 +70,10 @@ var (
// Migrations
migrationsBucket = []byte("migrations")
// Custody
custodyBucket = []byte("custody")
groupCountKey = []byte("group-count")
earliestAvailableSlotKey = []byte("earliest-available-slot")
subscribeAllDataSubnetsKey = []byte("subscribe-all-data-subnets")
)

View File

@@ -6,14 +6,12 @@ import (
"fmt"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
statenative "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -65,21 +63,21 @@ func (s *Store) StateOrError(ctx context.Context, blockRoot [32]byte) (state.Bea
return st, nil
}
// GenesisState returns the genesis state in beacon chain.
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisState")
st, err := genesis.State()
if errors.Is(err, genesis.ErrGenesisStateNotInitialized) {
log.WithError(err).Error("genesis state not initialized, returning nil state. this should only happen in tests")
return nil, nil
}
return st, err
}
// GenesisState returns the genesis state in beacon chain.
func (s *Store) LegacyGenesisState(ctx context.Context) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LegacyGenesisState")
defer span.End()
cached, err := genesis.State(params.BeaconConfig().ConfigName)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
span.SetAttributes(trace.BoolAttribute("cache_hit", cached != nil))
if cached != nil {
return cached, nil
}
var err error
var st state.BeaconState
err = s.db.View(func(tx *bolt.Tx) error {
// Retrieve genesis block's signing root from blocks bucket,

View File

@@ -15,6 +15,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -488,7 +489,7 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) {
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), headRoot))
require.NoError(t, db.SaveState(t.Context(), st, headRoot))
genesis.StoreStateDuringTest(t, st)
savedGenesisS, err := db.GenesisState(t.Context())
require.NoError(t, err)
@@ -661,7 +662,7 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
require.NoError(t, err)
genesisRoot := [32]byte{'a'}
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot))
genesis.StoreStateDuringTest(t, genesisState)
b := util.NewBeaconBlock()
b.Block.Slot = 1

View File

@@ -3,9 +3,9 @@ package kv
import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/genesis/embedded"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
)
@@ -18,7 +18,7 @@ func TestSaveOrigin(t *testing.T) {
ctx := t.Context()
db := setupDB(t)
st, err := genesis.State(params.MainnetName)
st, err := embedded.ByName(params.MainnetName)
require.NoError(t, err)
sb, err := st.MarshalSSZ()

View File

@@ -125,6 +125,7 @@ go_test(
"//contracts/deposit/mock:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//monitoring/clientstats:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -22,6 +22,7 @@ import (
contracts "github.com/OffchainLabs/prysm/v6/contracts/deposit"
"github.com/OffchainLabs/prysm/v6/contracts/deposit/mock"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/monitoring/clientstats"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
@@ -381,6 +382,7 @@ func TestInitDepositCache_OK(t *testing.T) {
require.NoError(t, err)
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), blockRootA))
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, blockRootA))
genesis.StoreStateDuringTest(t, emptyState)
s.chainStartData.Chainstarted = true
require.NoError(t, s.initDepositCaches(t.Context(), ctrs))
require.Equal(t, 3, len(s.cfg.depositCache.PendingContainers(t.Context(), nil)))
@@ -446,6 +448,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), headRoot))
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, headRoot))
require.NoError(t, stateGen.SaveState(t.Context(), headRoot, emptyState))
genesis.StoreStateDuringTest(t, emptyState)
s.cfg.stateGen = stateGen
require.NoError(t, emptyState.SetEth1DepositIndex(3))
@@ -594,6 +597,7 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
require.NoError(t, err)
assert.NoError(t, genState.SetSlot(1000))
genesis.StoreStateDuringTest(t, genState)
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
_, err = s1.validPowchainData(t.Context())
require.NoError(t, err)
@@ -655,6 +659,7 @@ func TestService_EnsureValidPowchainData(t *testing.T) {
require.NoError(t, err)
assert.NoError(t, genState.SetSlot(1000))
genesis.StoreStateDuringTest(t, genState)
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
err = s1.cfg.beaconDB.SaveExecutionChainData(t.Context(), &ethpb.ETH1ChainData{

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"clear_db.go",
"config.go",
"log.go",
"node.go",
@@ -23,7 +24,6 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/kv:go_default_library",
@@ -50,7 +50,6 @@ go_library(
"//beacon-chain/sync/backfill:go_default_library",
"//beacon-chain/sync/backfill/coverage:go_default_library",
"//beacon-chain/sync/checkpoint:go_default_library",
"//beacon-chain/sync/genesis:go_default_library",
"//beacon-chain/sync/initial-sync:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd:go_default_library",
@@ -60,6 +59,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//monitoring/prometheus:go_default_library",
"//monitoring/tracing:go_default_library",
"//runtime:go_default_library",

View File

@@ -0,0 +1,101 @@
package node
import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/slasherkv"
"github.com/OffchainLabs/prysm/v6/cmd"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
)
type dbClearer struct {
shouldClear bool
force bool
confirmed bool
}
const (
clearConfirmation = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
clearDeclined = "Database will not be deleted. No changes have been made."
)
func (c *dbClearer) clearKV(ctx context.Context, db *kv.Store) (*kv.Store, error) {
if !c.shouldProceed() {
return db, nil
}
log.Warning("Removing database")
if err := db.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
return kv.NewKVStore(ctx, db.DatabasePath())
}
func (c *dbClearer) clearBlobs(bs *filesystem.BlobStorage) error {
if !c.shouldProceed() {
return nil
}
log.Warning("Removing blob storage")
if err := bs.Clear(); err != nil {
return errors.Wrap(err, "could not clear blob storage")
}
return nil
}
func (c *dbClearer) clearColumns(cs *filesystem.DataColumnStorage) error {
if !c.shouldProceed() {
return nil
}
log.Warning("Removing data columns storage")
if err := cs.Clear(); err != nil {
return errors.Wrap(err, "could not clear data columns storage")
}
return nil
}
func (c *dbClearer) clearSlasher(ctx context.Context, db *slasherkv.Store) (*slasherkv.Store, error) {
if !c.shouldProceed() {
return db, nil
}
log.Warning("Removing slasher database")
if err := db.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear slasher database")
}
return slasherkv.NewKVStore(ctx, db.DatabasePath())
}
func (c *dbClearer) shouldProceed() bool {
if !c.shouldClear {
return false
}
if c.force {
return true
}
if !c.confirmed {
confirmed, err := cmd.ConfirmAction(clearConfirmation, clearDeclined)
if err != nil {
log.WithError(err).Error("Not clearing db due to confirmation error")
return false
}
c.confirmed = confirmed
}
return c.confirmed
}
func newDbClearer(cliCtx *cli.Context) *dbClearer {
force := cliCtx.Bool(cmd.ForceClearDB.Name)
return &dbClearer{
shouldClear: cliCtx.Bool(cmd.ClearDB.Name) || force,
force: force,
}
}

View File

@@ -26,7 +26,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
@@ -53,7 +52,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill/coverage"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/checkpoint"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/genesis"
initialsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/cmd"
@@ -63,6 +61,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/slice"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/monitoring/prometheus"
"github.com/OffchainLabs/prysm/v6/runtime"
"github.com/OffchainLabs/prysm/v6/runtime/prereqs"
@@ -114,7 +113,7 @@ type BeaconNode struct {
slasherAttestationsFeed *event.Feed
finalizedStateAtStartUp state.BeaconState
serviceFlagOpts *serviceFlagOpts
GenesisInitializer genesis.Initializer
GenesisProviders []genesis.Provider
CheckpointInitializer checkpoint.Initializer
forkChoicer forkchoice.ForkChoicer
clockWaiter startup.ClockWaiter
@@ -124,11 +123,11 @@ type BeaconNode struct {
BlobStorageOptions []filesystem.BlobStorageOption
DataColumnStorage *filesystem.DataColumnStorage
DataColumnStorageOptions []filesystem.DataColumnStorageOption
custodyInfo *peerdas.CustodyInfo
verifyInitWaiter *verification.InitializerWaiter
syncChecker *initialsync.SyncChecker
slasherEnabled bool
lcStore *lightclient.Store
ConfigOptions []params.Option
}
// New creates a new node instance, sets up configuration options, and registers
@@ -137,18 +136,13 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
if err := configureBeacon(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not set beacon configuration options")
}
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
registry := runtime.NewServiceRegistry()
ctx := cliCtx.Context
beacon := &BeaconNode{
cliCtx: cliCtx,
ctx: ctx,
cancel: cancel,
services: registry,
services: runtime.NewServiceRegistry(),
stop: make(chan struct{}),
stateFeed: new(event.Feed),
blockFeed: new(event.Feed),
@@ -166,7 +160,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
serviceFlagOpts: &serviceFlagOpts{},
initialSyncComplete: make(chan struct{}),
syncChecker: &initialsync.SyncChecker{},
custodyInfo: &peerdas.CustodyInfo{},
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
}
@@ -176,6 +169,25 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
}
}
dbClearer := newDbClearer(cliCtx)
dataDir := cliCtx.String(cmd.DataDirFlag.Name)
boltFname := filepath.Join(dataDir, kv.BeaconNodeDbDirName)
kvdb, err := openDB(ctx, boltFname, dbClearer)
if err != nil {
return nil, errors.Wrap(err, "could not open database")
}
beacon.db = kvdb
providers := append(beacon.GenesisProviders, kv.NewLegacyGenesisProvider(kvdb))
if err := genesis.Initialize(ctx, dataDir, providers...); err != nil {
return nil, errors.Wrap(err, "could not initialize genesis state")
}
beacon.ConfigOptions = append([]params.Option{params.WithGenesisValidatorsRoot(genesis.ValidatorsRoot())}, beacon.ConfigOptions...)
params.BeaconConfig().ApplyOptions(beacon.ConfigOptions...)
params.BeaconConfig().InitializeForkSchedule()
params.LogDigests(params.BeaconConfig())
synchronizer := startup.NewClockSynchronizer()
beacon.clockWaiter = synchronizer
beacon.forkChoicer = doublylinkedtree.New()
@@ -194,6 +206,9 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
}
beacon.BlobStorage = blobs
}
if err := dbClearer.clearBlobs(beacon.BlobStorage); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
if beacon.DataColumnStorage == nil {
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
@@ -203,8 +218,11 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.DataColumnStorage = dataColumnStorage
}
if err := dbClearer.clearColumns(beacon.DataColumnStorage); err != nil {
return nil, errors.Wrap(err, "could not clear data column storage")
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
}
@@ -292,7 +310,7 @@ func configureBeacon(cliCtx *cli.Context) error {
return nil
}
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string, clearer *dbClearer) (*backfill.Store, error) {
ctx := cliCtx.Context
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
@@ -303,7 +321,7 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
beacon.DataColumnStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
return nil, errors.Wrap(err, "could not start slashing DB")
}
@@ -483,47 +501,6 @@ func (b *BeaconNode) Close() {
close(b.stop)
}
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
var err error
clearDBConfirmed := false
if clearDB && !forceClearDB {
const (
actionText = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText = "Database will not be deleted. No changes have been made."
)
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return nil, errors.Wrapf(err, "could not confirm action")
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
if err := b.DataColumnStorage.Clear(); err != nil {
return nil, errors.Wrap(err, "could not clear data column storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return nil, errors.Wrap(err, "could not create new database")
}
}
return d, nil
}
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
@@ -547,60 +524,36 @@ func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
return nil
}
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
var depositCache cache.DepositCache
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store, error) {
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(b.ctx, dbPath)
d, err := kv.NewKVStore(ctx, dbPath)
if err != nil {
return errors.Wrapf(err, "could not create database at %s", dbPath)
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
}
if clearDBRequired || forceClearDBRequired {
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
if err != nil {
return errors.Wrap(err, "could not clear database")
}
d, err = clearer.clearKV(ctx, d)
if err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
if err := d.RunMigrations(b.ctx); err != nil {
return err
}
return d, d.RunMigrations(ctx)
}
b.db = d
depositCache, err = depositsnapshot.New()
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
depositCache, err := depositsnapshot.New()
if err != nil {
return errors.Wrap(err, "could not create deposit cache")
}
b.depositCache = depositCache
if b.GenesisInitializer != nil {
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
if errors.Is(err, db.ErrExistingGenesisState) {
return errors.Errorf("Genesis state flag specified but a genesis state "+
"exists already. Run again with --%s and/or ensure you are using the "+
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
}
return errors.Wrap(err, "could not load genesis from file")
}
}
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
return errors.Wrap(err, "could not ensure embedded genesis")
}
if b.CheckpointInitializer != nil {
log.Info("Checkpoint sync - Downloading origin state and block")
if err := b.CheckpointInitializer.Initialize(b.ctx, d); err != nil {
if err := b.CheckpointInitializer.Initialize(b.ctx, b.db); err != nil {
return err
}
}
@@ -612,49 +565,25 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
log.WithField("address", depositAddress).Info("Deposit contract")
return nil
}
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context, clearer *dbClearer) error {
if !b.slasherEnabled {
return nil
}
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
if cliCtx.IsSet(flags.SlasherDirFlag.Name) {
baseDir = cliCtx.String(flags.SlasherDirFlag.Name)
}
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
return err
}
clearDBConfirmed := false
if clearDB && !forceClearDB {
actionText := "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText := "Database will not be deleted. No changes have been made."
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return err
}
d, err = clearer.clearSlasher(b.ctx, d)
if err != nil {
return errors.Wrap(err, "could not clear slasher database")
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return errors.Wrap(err, "could not clear database")
}
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
}
}
b.slasherDB = d
return nil
}
@@ -704,6 +633,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
DataDir: dataDir,
DiscoveryDir: filepath.Join(dataDir, "discovery"),
LocalIP: cliCtx.String(cmd.P2PIP.Name),
HostAddress: cliCtx.String(cmd.P2PHost.Name),
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
@@ -721,7 +651,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
StateNotifier: b,
DB: b.db,
ClockWaiter: b.clockWaiter,
CustodyInfo: b.custodyInfo,
})
if err != nil {
return err
@@ -804,7 +733,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
blockchain.WithPayloadIDCache(b.payloadIDCache),
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithCustodyInfo(b.custodyInfo),
blockchain.WithSlasherEnabled(b.slasherEnabled),
blockchain.WithLightClientStore(b.lcStore),
)
@@ -893,7 +821,6 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
regularsync.WithAvailableBlocker(bFillStore),
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
regularsync.WithCustodyInfo(b.custodyInfo),
regularsync.WithSlasherEnabled(b.slasherEnabled),
regularsync.WithLightClientStore(b.lcStore),
regularsync.WithBatchVerifierLimit(b.cliCtx.Int(flags.BatchVerifierLimit.Name)),
@@ -921,7 +848,6 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
InitialSyncComplete: complete,
BlobStorage: b.BlobStorage,
DataColumnStorage: b.DataColumnStorage,
CustodyInfo: b.custodyInfo,
}, opts...)
return b.services.RegisterService(is)
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v6/config/params"
)
// Option for beacon node configuration.
@@ -51,6 +52,13 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
}
}
func WithConfigOptions(opt ...params.Option) Option {
return func(bn *BeaconNode) error {
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
return func(bn *BeaconNode) error {

View File

@@ -71,7 +71,6 @@ go_library(
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
"//runtime:go_default_library",
@@ -104,6 +103,7 @@ go_library(
"@com_github_libp2p_go_mplex//:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -168,7 +168,6 @@ go_test(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
@@ -179,6 +178,7 @@ go_test(
"//testing/util:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",

View File

@@ -15,7 +15,6 @@ import (
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
@@ -24,6 +23,8 @@ import (
"google.golang.org/protobuf/proto"
)
const minimumPeersPerSubnetForBroadcast = 1
// ErrMessageNotMapped occurs on a Broadcast attempt when a message has not been defined in the
// GossipTypeMapping.
var ErrMessageNotMapped = errors.New("message type is not mapped to a PubSub topic")
@@ -124,15 +125,13 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
if err := func() error {
s.subnetLocker(subnet).Lock()
defer s.subnetLocker(subnet).Unlock()
ok, err := s.FindPeersWithSubnet(ctx, attestationToTopic(subnet, forkDigest), subnet, 1)
if err != nil {
return err
if err := s.FindAndDialPeersWithSubnets(ctx, AttestationSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
return errors.Wrap(err, "find peers with subnets")
}
if ok {
savedAttestationBroadcasts.Inc()
return nil
}
return errors.New("failed to find peers for subnet")
savedAttestationBroadcasts.Inc()
return nil
}(); err != nil {
log.WithError(err).Error("Failed to find peers")
tracing.AnnotateError(span, err)
@@ -183,15 +182,12 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
if err := func() error {
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
ok, err := s.FindPeersWithSubnet(ctx, syncCommitteeToTopic(subnet, forkDigest), subnet, 1)
if err != nil {
return err
if err := s.FindAndDialPeersWithSubnets(ctx, SyncCommitteeSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
return errors.Wrap(err, "find peers with subnets")
}
if ok {
savedSyncCommitteeBroadcasts.Inc()
return nil
}
return errors.New("failed to find peers for subnet")
savedSyncCommitteeBroadcasts.Inc()
return nil
}(); err != nil {
log.WithError(err).Error("Failed to find peers")
tracing.AnnotateError(span, err)
@@ -250,15 +246,13 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
if err := func() error {
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
ok, err := s.FindPeersWithSubnet(ctx, blobSubnetToTopic(subnet, forkDigest), subnet, 1)
if err != nil {
return err
if err := s.FindAndDialPeersWithSubnets(ctx, BlobSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
return errors.Wrap(err, "find peers with subnets")
}
if ok {
blobSidecarBroadcasts.Inc()
return nil
}
return errors.New("failed to find peers for subnet")
blobSidecarBroadcasts.Inc()
return nil
}(); err != nil {
log.WithError(err).Error("Failed to find peers")
tracing.AnnotateError(span, err)
@@ -279,14 +273,8 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
return errors.New("attempted to broadcast nil light client optimistic update")
}
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest")
tracing.AnnotateError(span, err)
return err
}
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
err := errors.Wrap(err, "could not publish message")
tracing.AnnotateError(span, err)
@@ -305,13 +293,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
return errors.New("attempted to broadcast nil light client finality update")
}
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest")
tracing.AnnotateError(span, err)
return err
}
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client finality update")
err := errors.Wrap(err, "could not publish message")
@@ -329,7 +311,6 @@ func (s *Service) BroadcastDataColumn(
root [fieldparams.RootLength]byte,
dataColumnSubnet uint64,
dataColumnSidecar *ethpb.DataColumnSidecar,
peersCheckedChans ...chan<- bool, // Used for testing purposes to signal when peers are checked.
) error {
// Add tracing to the function.
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumn")
@@ -349,7 +330,7 @@ func (s *Service) BroadcastDataColumn(
}
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
go s.internalBroadcastDataColumn(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest, peersCheckedChans)
go s.internalBroadcastDataColumn(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
return nil
}
@@ -360,7 +341,6 @@ func (s *Service) internalBroadcastDataColumn(
columnSubnet uint64,
dataColumnSidecar *ethpb.DataColumnSidecar,
forkDigest [fieldparams.VersionLength]byte,
peersCheckedChans []chan<- bool, // Used for testing purposes to signal when peers are checked.
) {
// Add tracing to the function.
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
@@ -382,7 +362,7 @@ func (s *Service) internalBroadcastDataColumn(
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
// Find peers if needed.
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, topic, columnSubnet, peersCheckedChans); err != nil {
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, columnSubnet); err != nil {
log.WithError(err).Error("Failed to find peers for data column subnet")
tracing.AnnotateError(span, err)
}
@@ -416,35 +396,19 @@ func (s *Service) internalBroadcastDataColumn(
func (s *Service) findPeersIfNeeded(
ctx context.Context,
wrappedSubIdx uint64,
topic string,
topicFormat string,
forkDigest [fieldparams.VersionLength]byte,
subnet uint64,
peersCheckedChans []chan<- bool, // Used for testing purposes to signal when peers are checked.
) error {
// Sending a data column sidecar to only one peer is not ideal,
// but it ensures at least one peer receives it.
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
// Sending a data column sidecar to only one peer is not ideal,
// but it ensures at least one peer receives it.
const peerCount = 1
if s.hasPeerWithSubnet(topic) {
// Exit early if we already have peers with this subnet.
return nil
}
// Used for testing purposes.
if len(peersCheckedChans) > 0 {
peersCheckedChans[0] <- true
}
// No peers found, attempt to find peers with this subnet.
ok, err := s.FindPeersWithSubnet(ctx, topic, subnet, peerCount)
if err != nil {
if err := s.FindAndDialPeersWithSubnets(ctx, topicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
return errors.Wrap(err, "find peers with subnet")
}
if !ok {
return errors.Errorf("failed to find peers for topic %s with subnet %d", topic, subnet)
}
return nil
}

View File

@@ -16,12 +16,13 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -61,6 +62,7 @@ func TestService_Broadcast(t *testing.T) {
topic := "/eth2/%x/testing"
// Set a test gossip mapping for testpb.TestSimpleMessage.
GossipTypeMapping[reflect.TypeOf(msg)] = topic
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest)
@@ -216,9 +218,10 @@ func TestService_BroadcastAttestation(t *testing.T) {
}
func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
const port = uint(2000)
// Setup bootnode.
cfg := &Config{PingInterval: testPingInterval}
port := 2000
cfg.UDPPort = uint(port)
_, pkey := createAddrAndPrivKey(t)
ipAddr := net.ParseIP("127.0.0.1")
@@ -245,7 +248,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
PingInterval: testPingInterval,
}
// Setup 2 different hosts
for i := 1; i <= 2; i++ {
for i := uint(1); i <= 2; i++ {
h, pkey, ipAddr := createHost(t, port+i)
cfg.UDPPort = uint(port + i)
cfg.TCPPort = uint(port + i)
@@ -546,13 +549,11 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
}
l := util.NewTestLightClient(t, version.Altair)
msg, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
msg, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
require.NoError(t, err)
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
require.NoError(t, err)
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, digest)
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
@@ -613,13 +614,11 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
}
l := util.NewTestLightClient(t, version.Altair)
msg, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
msg, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
require.NoError(t, err)
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
require.NoError(t, err)
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, digest)
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
@@ -687,7 +686,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
// Create a host.
_, pkey, ipAddr := createHost(t, port)
p := &Service{
service := &Service{
ctx: t.Context(),
host: p1.BHost,
pubsub: p1.PubSub(),
@@ -701,56 +700,44 @@ func TestService_BroadcastDataColumn(t *testing.T) {
}
// Create a listener.
listener, err := p.startDiscoveryV5(ipAddr, pkey)
listener, err := service.startDiscoveryV5(ipAddr, pkey)
require.NoError(t, err)
p.dv5Listener = listener
service.dv5Listener = listener
digest, err := p.currentForkDigest()
digest, err := service.currentForkDigest()
require.NoError(t, err)
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
topic := fmt.Sprintf(topicFormat, digest, subnet)
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
sidecar := roSidecars[0].DataColumnSidecar
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
wg.Add(1)
peersChecked := make(chan bool, 0)
go func(tt *testing.T) {
defer wg.Done()
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
// Wait for the peers to be checked.
<-peersChecked
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
sub, err := p2.SubscribeToTopic(topic)
require.NoError(tt, err)
msg, err := sub.Next(ctx)
require.NoError(tt, err)
var result ethpb.DataColumnSidecar
require.NoError(tt, p.Encoding().DecodeGossip(msg.Data, &result))
require.DeepEqual(tt, &result, sidecar)
}(t)
var emptyRoot [fieldparams.RootLength]byte
// Attempt to broadcast nil object should fail.
err = p.BroadcastDataColumn(emptyRoot, subnet, nil)
var emptyRoot [fieldparams.RootLength]byte
err = service.BroadcastDataColumn(emptyRoot, subnet, nil)
require.ErrorContains(t, "attempted to broadcast nil", err)
// Broadcast to peers and wait.
err = p.BroadcastDataColumn(emptyRoot, subnet, sidecar, peersChecked)
// Subscribe to the topic.
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Minute), "Failed to receive pubsub within 1s")
// libp2p fails without this delay
time.Sleep(50 * time.Millisecond)
// Broadcast to peers and wait.
err = service.BroadcastDataColumn(emptyRoot, subnet, sidecar)
require.NoError(t, err)
// Receive the message.
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
msg, err := sub.Next(ctx)
require.NoError(t, err)
var result ethpb.DataColumnSidecar
require.NoError(t, service.Encoding().DecodeGossip(msg.Data, &result))
require.DeepEqual(t, &result, sidecar)
}

View File

@@ -4,7 +4,6 @@ import (
"time"
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
)
@@ -27,6 +26,7 @@ type Config struct {
HostDNS string
PrivateKey string
DataDir string
DiscoveryDir string
MetaDataDir string
QUICPort uint
TCPPort uint
@@ -39,7 +39,6 @@ type Config struct {
StateNotifier statefeed.Notifier
DB db.ReadOnlyDatabase
ClockWaiter startup.ClockWaiter
CustodyInfo *peerdas.CustodyInfo
}
// validateConfig validates whether the values provided are accurate and will set

View File

@@ -19,8 +19,7 @@ const (
// Burst limit for inbound dials.
ipBurst = 8
// High watermark buffer signifies the buffer till which
// we will handle inbound requests.
// High watermark buffer signifies the buffer till which we will handle inbound requests.
highWatermarkBuffer = 20
)
@@ -53,7 +52,7 @@ func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
"reason": "exceeded dial limit"}).Trace("Not accepting inbound dial from ip address")
return false
}
if s.isPeerAtLimit(true /* inbound */) {
if s.isPeerAtLimit(inbound) {
log.WithFields(logrus.Fields{"peer": n.RemoteMultiaddr(),
"reason": "at peer limit"}).Trace("Not accepting inbound dial")
return false

View File

@@ -3,12 +3,68 @@ package p2p
import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var _ DataColumnsHandler = (*Service)(nil)
// EarliestAvailableSlot returns the earliest available slot.
func (s *Service) EarliestAvailableSlot() primitives.Slot {
s.custodyInfoMut.RLock()
defer s.custodyInfoMut.RUnlock()
return s.earliestAvailableSlot
}
// CustodyGroupCount returns the custody group count.
func (s *Service) CustodyGroupCount() uint64 {
s.custodyInfoMut.Lock()
defer s.custodyInfoMut.Unlock()
return s.custodyGroupCount
}
// UdpateCustodyInfo updates the custody group count and earliest available slot
// if the new custody group count is greater than the stored one.
// It returns the (potentially updated) earliest available slot and custody group count.
func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
s.custodyInfoMut.Lock()
defer s.custodyInfoMut.Unlock()
if custodyGroupCount <= s.custodyGroupCount {
return s.earliestAvailableSlot, s.custodyGroupCount, nil
}
if earliestAvailableSlot < s.earliestAvailableSlot {
return 0, 0, errors.Errorf(
"earliest available slot %d is less than the current one %d. (custody group count: %d, current one: %d)",
earliestAvailableSlot, s.earliestAvailableSlot, custodyGroupCount, s.custodyGroupCount,
)
}
s.custodyGroupCount = custodyGroupCount
fuluForkSlot, err := fuluForkSlot()
if err != nil {
return 0, 0, errors.Wrap(err, "fulu fork slot")
}
if earliestAvailableSlot >= fuluForkSlot {
s.earliestAvailableSlot = earliestAvailableSlot
}
log.WithFields(logrus.Fields{
"earliestAvailableSlot": s.earliestAvailableSlot,
"custodyGroupCount": s.custodyGroupCount,
}).Debug("Custody info updated")
return s.earliestAvailableSlot, s.custodyGroupCount, nil
}
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
// It first tries to get the custody group count from the peer's metadata,
// then falls back to the ENR value if the metadata is not available, then
@@ -72,3 +128,19 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
return custodyGroupCount
}
func fuluForkSlot() (primitives.Slot, error) {
beaconConfig := params.BeaconConfig()
fuluForkEpoch := beaconConfig.FuluForkEpoch
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
return beaconConfig.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
if err != nil {
return 0, errors.Wrap(err, "epoch start")
}
return forkFuluSlot, nil
}

View File

@@ -2,13 +2,16 @@ package p2p
import (
"bytes"
"context"
"crypto/ecdsa"
"math"
"net"
"sync"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -26,42 +29,54 @@ import (
"github.com/sirupsen/logrus"
)
type ListenerRebooter interface {
Listener
RebootListener() error
}
type (
// ListenerRebooter is an interface that extends the Listener interface
// with the `RebootListener` method.
ListenerRebooter interface {
Listener
RebootListener() error
}
// Listener defines the discovery V5 network interface that is used
// to communicate with other peers.
type Listener interface {
Self() *enode.Node
Close()
Lookup(enode.ID) []*enode.Node
Resolve(*enode.Node) *enode.Node
RandomNodes() enode.Iterator
Ping(*enode.Node) error
RequestENR(*enode.Node) (*enode.Node, error)
LocalNode() *enode.LocalNode
}
// Listener defines the discovery V5 network interface that is used
// to communicate with other peers.
Listener interface {
Self() *enode.Node
Close()
Lookup(enode.ID) []*enode.Node
Resolve(*enode.Node) *enode.Node
RandomNodes() enode.Iterator
Ping(*enode.Node) error
RequestENR(*enode.Node) (*enode.Node, error)
LocalNode() *enode.LocalNode
}
const (
udp4 = iota
udp6
quicProtocol uint16
listenerWrapper struct {
mu sync.RWMutex
listener *discover.UDPv5
listenerCreator func() (*discover.UDPv5, error)
}
connectivityDirection int
udpVersion int
)
const quickProtocolEnrKey = "quic"
type quicProtocol uint16
const (
udp4 udpVersion = iota
udp6
)
const (
inbound connectivityDirection = iota
all
)
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
type listenerWrapper struct {
mu sync.RWMutex
listener *discover.UDPv5
listenerCreator func() (*discover.UDPv5, error)
}
func newListener(listenerCreator func() (*discover.UDPv5, error)) (*listenerWrapper, error) {
rawListener, err := listenerCreator()
if err != nil {
@@ -227,9 +242,6 @@ func (s *Service) RefreshPersistentSubnets() {
// Compare current epoch with the Fulu fork epoch.
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
// Get the current custody group count.
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
// Get the custody group count we store in our record.
inRecordCustodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
if err != nil {
@@ -237,6 +249,8 @@ func (s *Service) RefreshPersistentSubnets() {
return
}
custodyGroupCount := s.CustodyGroupCount()
// We add `1` to the current epoch because we want to prepare one epoch before the Fulu fork.
if currentEpoch+1 < fuluForkEpoch {
// Is our custody group count record up to date?
@@ -278,29 +292,10 @@ func (s *Service) RefreshPersistentSubnets() {
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
func (s *Service) listenForNewNodes() {
const (
minLogInterval = 1 * time.Minute
thresholdLimit = 5
searchPeriod = 20 * time.Second
)
peersSummary := func(threshold uint) (uint, uint) {
// Retrieve how many active peers we have.
activePeers := s.Peers().Active()
activePeerCount := uint(len(activePeers))
// Compute how many peers we are missing to reach the threshold.
if activePeerCount >= threshold {
return activePeerCount, 0
}
missingPeerCount := threshold - activePeerCount
return activePeerCount, missingPeerCount
}
var lastLogTime time.Time
iterator := s.dv5Listener.RandomNodes()
defer iterator.Close()
connectivityTicker := time.NewTicker(1 * time.Minute)
thresholdCount := 0
@@ -332,74 +327,148 @@ func (s *Service) listenForNewNodes() {
continue
}
iterator = s.dv5Listener.RandomNodes()
thresholdCount = 0
}
default:
if s.isPeerAtLimit(false /* inbound */) {
// Pause the main loop for a period to stop looking
// for new peers.
if s.isPeerAtLimit(all) {
// Pause the main loop for a period to stop looking for new peers.
log.Trace("Not looking for peers, at peer limit")
time.Sleep(pollingPeriod)
continue
}
// Compute the number of new peers we want to dial.
activePeerCount, missingPeerCount := peersSummary(s.cfg.MaxPeers)
fields := logrus.Fields{
"currentPeerCount": activePeerCount,
"targetPeerCount": s.cfg.MaxPeers,
// Return early if the discovery listener isn't set.
if s.dv5Listener == nil {
return
}
if missingPeerCount == 0 {
log.Trace("Not looking for peers, at peer limit")
time.Sleep(pollingPeriod)
continue
}
func() {
ctx, cancel := context.WithTimeout(s.ctx, searchPeriod)
defer cancel()
if time.Since(lastLogTime) > minLogInterval {
lastLogTime = time.Now()
log.WithFields(fields).Debug("Searching for new active peers")
}
// Restrict dials if limit is applied.
if flags.MaxDialIsActive() {
maxConcurrentDials := uint(flags.Get().MaxConcurrentDials)
missingPeerCount = min(missingPeerCount, maxConcurrentDials)
}
// Search for new peers.
wantedNodes := searchForPeers(iterator, batchPeriod, missingPeerCount, s.filterPeer)
wg := new(sync.WaitGroup)
for i := 0; i < len(wantedNodes); i++ {
node := wantedNodes[i]
peerInfo, _, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Error("Could not convert to peer info")
continue
if err := s.findAndDialPeers(ctx); err != nil && !errors.Is(err, context.DeadlineExceeded) {
log.WithError(err).Error("Failed to find and dial peers")
}
if peerInfo == nil {
continue
}
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
s.Peers().RandomizeBackOff(peerInfo.ID)
wg.Add(1)
go func(info *peer.AddrInfo) {
if err := s.connectWithPeer(s.ctx, *info); err != nil {
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
wg.Done()
}(peerInfo)
}
wg.Wait()
}()
}
}
}
// FindAndDialPeersWithSubnets ensures that our node is connected to enough peers.
// If, the threshold is met, then this function immediately returns.
// Otherwise, it searches for new peers and dials them.
// If `ctx“ is canceled while searching for peers, search is stopped, but new found peers are still dialed.
// In this case, the function returns an error.
func (s *Service) findAndDialPeers(ctx context.Context) error {
// Restrict dials if limit is applied.
maxConcurrentDials := math.MaxInt
if flags.MaxDialIsActive() {
maxConcurrentDials = flags.Get().MaxConcurrentDials
}
missingPeerCount := s.missingPeerCount(s.cfg.MaxPeers)
for missingPeerCount > 0 {
// Stop the search/dialing loop if the context is canceled.
if err := ctx.Err(); err != nil {
return err
}
peersToDial, err := func() ([]*enode.Node, error) {
ctx, cancel := context.WithTimeout(ctx, batchPeriod)
defer cancel()
peersToDial, err := s.findPeers(ctx, missingPeerCount)
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
return nil, errors.Wrap(err, "find peers")
}
return peersToDial, nil
}()
if err != nil {
return err
}
dialedPeerCount := s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
if dialedPeerCount > missingPeerCount {
missingPeerCount = 0
continue
}
missingPeerCount -= dialedPeerCount
}
return nil
}
// findAndDialPeers finds new peers until `targetPeerCount` is reached, `batchPeriod` is over,
// the peers iterator is exhausted or the context is canceled.
func (s *Service) findPeers(ctx context.Context, missingPeerCount uint) ([]*enode.Node, error) {
// Create an discovery iterator to find new peers.
iterator := s.dv5Listener.RandomNodes()
// `iterator.Next` can block indefinitely. `iterator.Close` unblocks it.
// So it is important to close the iterator when the context is done to ensure
// that the search does not hang indefinitely.
go func() {
<-ctx.Done()
iterator.Close()
}()
// Crawl the network for peers subscribed to the defective subnets.
nodeByNodeID := make(map[enode.ID]*enode.Node)
for missingPeerCount > 0 && iterator.Next() {
if ctx.Err() != nil {
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
for _, node := range nodeByNodeID {
peersToDial = append(peersToDial, node)
}
return peersToDial, ctx.Err()
}
// Skip peer not matching the filter.
node := iterator.Node()
if !s.filterPeer(node) {
continue
}
// Remove duplicates, keeping the node with higher seq.
existing, ok := nodeByNodeID[node.ID()]
if ok && existing.Seq() > node.Seq() {
continue
}
nodeByNodeID[node.ID()] = node
// We found a new peer. Decrease the missing peer count.
missingPeerCount--
}
// Convert the map to a slice.
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
for _, node := range nodeByNodeID {
peersToDial = append(peersToDial, node)
}
return peersToDial, nil
}
// missingPeerCount computes how many peers we are missing to reach the target peer count.
func (s *Service) missingPeerCount(targetCount uint) uint {
// Retrieve how many active peers we have.
activePeers := s.Peers().Active()
activePeerCount := uint(len(activePeers))
// Compute how many peers we are missing to reach the threshold.
missingPeerCount := uint(0)
if targetCount > activePeerCount {
missingPeerCount = targetCount - activePeerCount
}
return missingPeerCount
}
func (s *Service) createListener(
ipAddr net.IP,
privKey *ecdsa.PrivateKey,
@@ -478,7 +547,7 @@ func (s *Service) createLocalNode(
ipAddr net.IP,
udpPort, tcpPort, quicPort int,
) (*enode.LocalNode, error) {
db, err := enode.OpenDB("")
db, err := enode.OpenDB(s.cfg.DiscoveryDir)
if err != nil {
return nil, errors.Wrap(err, "could not open node's peer database")
}
@@ -498,22 +567,23 @@ func (s *Service) createLocalNode(
localNode.Set(quicEntry)
}
if params.FuluEnabled() {
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
localNode.Set(peerdas.Cgc(custodyGroupCount))
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
clock := startup.NewClock(s.genesisTime, [32]byte(s.genesisValidatorsRoot))
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
if err := updateENR(localNode, current, next); err != nil {
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
}
localNode = initializeAttSubnets(localNode)
localNode = initializeSyncCommSubnets(localNode)
custodyGroupCount := s.CustodyGroupCount()
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
localNode.Set(custodyGroupCountEntry)
if s.cfg != nil && s.cfg.HostAddress != "" {
hostIP := net.ParseIP(s.cfg.HostAddress)
if hostIP.To4() == nil && hostIP.To16() == nil {
@@ -537,7 +607,10 @@ func (s *Service) createLocalNode(
localNode.SetFallbackIP(firstIP)
}
}
log.WithFields(logrus.Fields{
"seq": localNode.Seq(),
"id": localNode.ID(),
}).Debug("Local node created")
return localNode, nil
}
@@ -553,7 +626,11 @@ func (s *Service) startDiscoveryV5(
return nil, errors.Wrap(err, "could not create listener")
}
record := wrappedListener.Self()
log.WithField("ENR", record.String()).Info("Started discovery v5")
log.WithFields(logrus.Fields{
"ENR": record.String(),
"seq": record.Seq(),
}).Info("Started discovery v5")
return wrappedListener, nil
}
@@ -564,8 +641,7 @@ func (s *Service) startDiscoveryV5(
// 2. Peer hasn't been marked as 'bad'.
// 3. Peer is not currently active or connected.
// 4. Peer is ready to receive incoming connections.
// 5. Peer's fork digest in their ENR matches that of
// our localnodes.
// 5. Peer's fork digest in their ENR matches that of our localnodes.
func (s *Service) filterPeer(node *enode.Node) bool {
// Ignore nil node entries passed in.
if node == nil {
@@ -612,7 +688,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
// Ignore nodes that don't match our fork digest.
nodeENR := node.Record()
if s.genesisValidatorsRoot != nil {
if err := s.compareForkENR(nodeENR); err != nil {
if err := compareForkENR(s.dv5Listener.LocalNode().Node().Record(), nodeENR); err != nil {
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
return false
}
@@ -630,22 +706,24 @@ func (s *Service) filterPeer(node *enode.Node) bool {
// This checks our set max peers in our config, and
// determines whether our currently connected and
// active peers are above our set max peer limit.
func (s *Service) isPeerAtLimit(inbound bool) bool {
numOfConns := len(s.host.Network().Peers())
func (s *Service) isPeerAtLimit(direction connectivityDirection) bool {
maxPeers := int(s.cfg.MaxPeers)
// If we are measuring the limit for inbound peers
// we apply the high watermark buffer.
if inbound {
// If we are measuring the limit for inbound peers we apply the high watermark buffer.
if direction == inbound {
maxPeers += highWatermarkBuffer
maxInbound := s.peers.InboundLimit() + highWatermarkBuffer
currInbound := len(s.peers.InboundConnected())
// Exit early if we are at the inbound limit.
if currInbound >= maxInbound {
inboundCount := len(s.peers.InboundConnected())
// Return early if we are at the inbound limit.
if inboundCount >= maxInbound {
return true
}
}
activePeers := len(s.Peers().Active())
return activePeers >= maxPeers || numOfConns >= maxPeers
peerCount := len(s.host.Network().Peers())
activePeerCount := len(s.Peers().Active())
return activePeerCount >= maxPeers || peerCount >= maxPeers
}
// isBelowOutboundPeerThreshold checks if the number of outbound peers that
@@ -903,7 +981,7 @@ func multiAddrFromString(address string) (ma.Multiaddr, error) {
return ma.NewMultiaddr(address)
}
func udpVersionFromIP(ipAddr net.IP) int {
func udpVersionFromIP(ipAddr net.IP) udpVersion {
if ipAddr.To4() != nil {
return udp4
}

View File

@@ -16,7 +16,6 @@ import (
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
@@ -157,27 +156,27 @@ func TestCreateLocalNode(t *testing.T) {
}{
{
name: "valid config",
cfg: &Config{CustodyInfo: &peerdas.CustodyInfo{}},
cfg: &Config{},
expectedError: false,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
cfg: &Config{HostAddress: "invalid"},
expectedError: true,
},
{
name: "valid host address",
cfg: &Config{HostAddress: "192.168.0.1", CustodyInfo: &peerdas.CustodyInfo{}},
cfg: &Config{HostAddress: "192.168.0.1"},
expectedError: false,
},
{
name: "invalid host DNS",
cfg: &Config{HostDNS: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
cfg: &Config{HostDNS: "invalid"},
expectedError: true,
},
{
name: "valid host DNS",
cfg: &Config{HostDNS: "www.google.com", CustodyInfo: &peerdas.CustodyInfo{}},
cfg: &Config{HostDNS: "www.google.com"},
expectedError: false,
},
}
@@ -199,6 +198,7 @@ func TestCreateLocalNode(t *testing.T) {
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: tt.cfg,
custodyGroupCount: params.BeaconConfig().CustodyRequirement,
}
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
@@ -210,7 +210,7 @@ func TestCreateLocalNode(t *testing.T) {
require.NoError(t, err)
expectedAddress := address
if tt.cfg.HostAddress != "" {
if tt.cfg != nil && tt.cfg.HostAddress != "" {
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
}
@@ -264,6 +264,7 @@ func TestRebootDiscoveryListener(t *testing.T) {
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{UDPPort: uint(port)},
}
createListener := func() (*discover.UDPv5, error) {
return s.createListener(ipAddr, pkey)
}
@@ -293,6 +294,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
s := &Service{
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
cfg: &Config{},
}
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
require.NoError(t, err)
@@ -323,16 +325,16 @@ func TestMultiAddrConversion_OK(t *testing.T) {
}
func TestStaticPeering_PeersAreAdded(t *testing.T) {
const port = uint(6000)
cs := startup.NewClockSynchronizer()
cfg := &Config{
MaxPeers: 30,
ClockWaiter: cs,
}
port := 6000
var staticPeers []string
var hosts []host.Host
// setup other nodes
for i := 1; i <= 5; i++ {
for i := uint(1); i <= 5; i++ {
h, _, ipaddr := createHost(t, port+i)
staticPeers = append(staticPeers, fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%s", ipaddr, port+i, h.ID()))
hosts = append(hosts, h)
@@ -370,14 +372,17 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
}
func TestHostIsResolved(t *testing.T) {
// ip.addr.tools - construct domain names that resolve to any given IP address
// ex: 192-0-2-1.ip.addr.tools resolves to 192.0.2.1.
exampleHost := "96-7-129-13.ip.addr.tools"
exampleIP := "96.7.129.13"
host := "dns.google"
ips := map[string]bool{
"8.8.8.8": true,
"8.8.4.4": true,
"2001:4860:4860::8888": true,
"2001:4860:4860::8844": true,
}
s := &Service{
cfg: &Config{
HostDNS: exampleHost,
HostDNS: host,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
@@ -387,7 +392,7 @@ func TestHostIsResolved(t *testing.T) {
require.NoError(t, err)
newIP := list.Self().IP()
assert.Equal(t, exampleIP, newIP.String(), "Did not resolve to expected IP")
assert.Equal(t, true, ips[newIP.String()], "Did not resolve to expected IP")
}
func TestInboundPeerLimit(t *testing.T) {
@@ -406,14 +411,14 @@ func TestInboundPeerLimit(t *testing.T) {
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
}
require.Equal(t, true, s.isPeerAtLimit(false), "not at limit for outbound peers")
require.Equal(t, false, s.isPeerAtLimit(true), "at limit for inbound peers")
require.Equal(t, true, s.isPeerAtLimit(all), "not at limit for outbound peers")
require.Equal(t, false, s.isPeerAtLimit(inbound), "at limit for inbound peers")
for i := 0; i < highWatermarkBuffer; i++ {
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
}
require.Equal(t, true, s.isPeerAtLimit(true), "not at limit for inbound peers")
require.Equal(t, true, s.isPeerAtLimit(inbound), "not at limit for inbound peers")
}
func TestOutboundPeerThreshold(t *testing.T) {
@@ -492,6 +497,35 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
assert.Equal(t, true, ipv6Found, "IPv6 discovery address not found")
}
func TestDiscoveryV5_SeqNumber(t *testing.T) {
db, err := enode.OpenDB(t.TempDir())
require.NoError(t, err)
_, key := createAddrAndPrivKey(t)
node := enode.NewLocalNode(db, key)
node.Set(enr.IPv4{127, 0, 0, 1})
currentSeq := node.Seq()
s := &Service{dv5Listener: mockListener{localNode: node}}
_, err = s.DiscoveryAddresses()
require.NoError(t, err)
newSeq := node.Seq()
require.Equal(t, currentSeq+1, newSeq) // node seq should increase when discovery starts
// see that the keys changing, will change the node seq
_, keyTwo := createAddrAndPrivKey(t)
nodeTwo := enode.NewLocalNode(db, keyTwo) // use the same db with different key
nodeTwo.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
seqTwo := nodeTwo.Seq()
assert.NotEqual(t, seqTwo, newSeq)
sTwo := &Service{dv5Listener: mockListener{localNode: nodeTwo}}
_, err = sTwo.DiscoveryAddresses()
require.NoError(t, err)
assert.Equal(t, seqTwo+1, nodeTwo.Seq())
// see that reloading the same node with same key and db results in same seq number
nodeThree := enode.NewLocalNode(db, key)
assert.Equal(t, node.Seq(), nodeThree.Seq())
}
func TestCorrectUDPVersion(t *testing.T) {
assert.Equal(t, udp4, udpVersionFromIP(net.IPv4zero), "incorrect network version")
assert.Equal(t, udp6, udpVersionFromIP(net.IPv6zero), "incorrect network version")
@@ -784,10 +818,11 @@ func TestRefreshPersistentSubnets(t *testing.T) {
actualPingCount++
return nil
},
cfg: &Config{UDPPort: 2000, CustodyInfo: &peerdas.CustodyInfo{}},
cfg: &Config{UDPPort: 2000},
peers: p2p.Peers(),
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
custodyGroupCount: params.BeaconConfig().CustodyRequirement,
}
// Set the listener and the metadata.

View File

@@ -1,7 +1,7 @@
/*
Package p2p implements the Ethereum consensus networking specification.
Canonical spec reference: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md
Canonical spec reference: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md
Prysm specific implementation design docs
- Networking Design Doc: https://docs.google.com/document/d/1VyhobQRkEjEkEPxmmdWvaHfKWn0j6dEae_wLZlrFtfU/view

View File

@@ -3,19 +3,18 @@ package p2p
import (
"bytes"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/network/forks"
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var errEth2ENRDigestMismatch = errors.New("fork digest of peer does not match local value")
// ENR key used for Ethereum consensus-related fork data.
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
@@ -25,29 +24,30 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
if !s.isInitialized() {
return [4]byte{}, errors.New("state is not initialized")
}
return forks.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot)
clock := startup.NewClock(s.genesisTime, [32]byte(s.genesisValidatorsRoot))
return params.ForkDigest(clock.CurrentEpoch()), nil
}
// Compares fork ENRs between an incoming peer's record and our node's
// local record values for current and next fork version/epoch.
func (s *Service) compareForkENR(record *enr.Record) error {
currentRecord := s.dv5Listener.LocalNode().Node().Record()
peerForkENR, err := forkEntry(record)
func compareForkENR(self, peer *enr.Record) error {
peerForkENR, err := forkEntry(peer)
if err != nil {
return err
}
currentForkENR, err := forkEntry(currentRecord)
currentForkENR, err := forkEntry(self)
if err != nil {
return err
}
enrString, err := SerializeENR(record)
enrString, err := SerializeENR(peer)
if err != nil {
return err
}
// Clients SHOULD connect to peers with current_fork_digest, next_fork_version,
// and next_fork_epoch that match local values.
if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) {
return fmt.Errorf(
return errors.Wrapf(errEth2ENRDigestMismatch,
"fork digest of peer with ENR %s: %v, does not match local value: %v",
enrString,
peerForkENR.CurrentForkDigest,
@@ -74,41 +74,36 @@ func (s *Service) compareForkENR(record *enr.Record) error {
return nil
}
// Adds a fork entry as an ENR record under the Ethereum consensus EnrKey for
// the local node. The fork entry is an ssz-encoded enrForkID type
// which takes into account the current fork version from the current
// epoch to create a fork digest, the next fork version,
// and the next fork epoch.
func addForkEntry(
node *enode.LocalNode,
genesisTime time.Time,
genesisValidatorsRoot []byte,
) (*enode.LocalNode, error) {
digest, err := forks.CreateForkDigest(genesisTime, genesisValidatorsRoot)
if err != nil {
return nil, err
}
currentSlot := slots.CurrentSlot(genesisTime)
currentEpoch := slots.ToEpoch(currentSlot)
if prysmTime.Now().Before(genesisTime) {
currentEpoch = 0
}
nextForkVersion, nextForkEpoch, err := forks.NextForkData(currentEpoch)
if err != nil {
return nil, err
}
func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) error {
enrForkID := &pb.ENRForkID{
CurrentForkDigest: digest[:],
NextForkVersion: nextForkVersion[:],
NextForkEpoch: nextForkEpoch,
CurrentForkDigest: entry.ForkDigest[:],
NextForkVersion: next.ForkVersion[:],
NextForkEpoch: next.Epoch,
}
if entry.Epoch == next.Epoch {
enrForkID.NextForkEpoch = params.BeaconConfig().FarFutureEpoch
}
logFields := logrus.Fields{
"CurrentForkDigest": fmt.Sprintf("%#x", enrForkID.CurrentForkDigest),
"NextForkVersion": fmt.Sprintf("%#x", enrForkID.NextForkVersion),
"NextForkEpoch": fmt.Sprintf("%d", enrForkID.NextForkEpoch),
}
if params.BeaconConfig().FuluForkEpoch != params.BeaconConfig().FarFutureEpoch {
if entry.ForkDigest == next.ForkDigest {
node.Set(enr.WithEntry(nfdEnrKey, make([]byte, len(next.ForkDigest))))
} else {
node.Set(enr.WithEntry(nfdEnrKey, next.ForkDigest[:]))
}
logFields[nfdEnrKey] = fmt.Sprintf("%#x", next.ForkDigest)
}
log.WithFields(logFields).Info("Updating ENR Fork ID")
enc, err := enrForkID.MarshalSSZ()
if err != nil {
return nil, err
return err
}
forkEntry := enr.WithEntry(eth2ENRKey, enc)
node.Set(forkEntry)
return node, nil
return nil
}
// Retrieves an enrForkID from an ENR record by key lookup

View File

@@ -8,242 +8,121 @@ import (
"testing"
"time"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
ma "github.com/multiformats/go-multiaddr"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
const port = 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
s := &Service{
cfg: &Config{
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
PingInterval: testPingInterval,
DisableLivenessCheck: true,
},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
// Allow bootnode's table to have its initial refresh. This allows
// inbound nodes to be added in.
time.Sleep(5 * time.Second)
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
StateNotifier: &mock.MockStateNotifier{},
PingInterval: testPingInterval,
DisableLivenessCheck: true,
}
var listeners []*listenerWrapper
for i := 1; i <= 5; i++ {
port := 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
// We give every peer a different genesis validators root, which
// will cause each peer to have a different ForkDigest, preventing
// them from connecting according to our discovery rules for Ethereum consensus.
root := make([]byte, 32)
copy(root, strconv.Itoa(port))
s = &Service{
cfg: cfg,
genesisTime: genesisTime,
genesisValidatorsRoot: root,
}
listener, err := s.startDiscoveryV5(ipAddr, pkey)
assert.NoError(t, err, "Could not start discovery for node")
listeners = append(listeners, listener)
}
defer func() {
// Close down all peers.
for _, listener := range listeners {
listener.Close()
}
}()
// Wait for the nodes to have their local routing tables to be populated with the other nodes
time.Sleep(discoveryWaitTime)
lastListener := listeners[len(listeners)-1]
nodes := lastListener.Lookup(bootNode.ID())
if len(nodes) < 4 {
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
"Expected more than or equal to %d but got %d", 4, len(nodes))
}
// Now, we start a new p2p service. It should have no peers aside from the
// bootnode given all nodes provided by discv5 will have different fork digests.
cfg.UDPPort = 14000
cfg.TCPPort = 14001
cfg.MaxPeers = 30
s, err = NewService(t.Context(), cfg)
require.NoError(t, err)
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
addrs := make([]ma.Multiaddr, 0)
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
}
}
// We should not have valid peers if the fork digest mismatched.
assert.Equal(t, 0, len(addrs), "Expected 0 valid peers")
require.NoError(t, s.Stop())
}
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
const port = 2000
func TestCompareForkENR(t *testing.T) {
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
params.BeaconConfig().InitializeForkSchedule()
logrus.SetLevel(logrus.TraceLevel)
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
s := &Service{
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
// Allow bootnode's table to have its initial refresh. This allows
// inbound nodes to be added in.
time.Sleep(5 * time.Second)
db, err := enode.OpenDB("")
assert.NoError(t, err)
_, k := createAddrAndPrivKey(t)
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
self := enode.NewLocalNode(db, k)
require.NoError(t, updateENR(self, current, next))
bootNode := bootListener.Self()
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
UDPPort: uint(port),
PingInterval: testPingInterval,
DisableLivenessCheck: true,
cases := []struct {
name string
expectErr error
expectLog string
node func(t *testing.T) *enode.Node
}{
{
name: "match",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
require.NoError(t, updateENR(peer, current, next))
return peer.Node()
},
},
{
name: "current digest mismatch",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
testDigest := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
require.NotEqual(t, current.ForkDigest, testDigest, "ensure test fork digest is unique")
current := current.Copy()
current.ForkDigest = testDigest
require.NoError(t, updateENR(peer, current, next))
return peer.Node()
},
expectErr: errEth2ENRDigestMismatch,
},
{
name: "next fork version mismatch",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
testVersion := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
require.NotEqual(t, next.ForkVersion, testVersion, "ensure test fork version is unique")
next := next.Copy()
next.ForkVersion = testVersion
require.NoError(t, updateENR(peer, current, next))
return peer.Node()
},
expectLog: "Peer matches fork digest but has different next fork version",
},
{
name: "next fork epoch mismatch",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
next := next.Copy()
next.Epoch = next.Epoch + 1
require.NoError(t, updateENR(peer, current, next))
return peer.Node()
},
expectLog: "Peer matches fork digest but has different next fork epoch",
},
}
var listeners []*listenerWrapper
for i := 1; i <= 5; i++ {
port := 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
c := params.BeaconConfig().Copy()
nextForkEpoch := primitives.Epoch(i)
c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
params.OverrideBeaconConfig(c)
// We give every peer a different genesis validators root, which
// will cause each peer to have a different ForkDigest, preventing
// them from connecting according to our discovery rules for Ethereum consensus.
s = &Service{
cfg: cfg,
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
listener, err := s.startDiscoveryV5(ipAddr, pkey)
assert.NoError(t, err, "Could not start discovery for node")
listeners = append(listeners, listener)
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
hook := logTest.NewGlobal()
peer := c.node(t)
err := compareForkENR(self.Node().Record(), peer.Record())
if c.expectErr != nil {
require.ErrorIs(t, err, c.expectErr, "Expected error to match")
} else {
require.NoError(t, err, "Expected no error comparing fork ENRs")
}
if c.expectLog != "" {
require.LogsContain(t, hook, c.expectLog, "Expected log message not found")
}
})
}
defer func() {
// Close down all peers.
for _, listener := range listeners {
listener.Close()
}
}()
// Wait for the nodes to have their local routing tables to be populated with the other nodes
time.Sleep(discoveryWaitTime)
lastListener := listeners[len(listeners)-1]
nodes := lastListener.Lookup(bootNode.ID())
if len(nodes) < 4 {
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
"Expected more than or equal to %d but got %d", 4, len(nodes))
}
// Now, we start a new p2p service. It should have no peers aside from the
// bootnode given all nodes provided by discv5 will have different fork digests.
cfg.UDPPort = 14000
cfg.TCPPort = 14001
cfg.MaxPeers = 30
cfg.StateNotifier = &mock.MockStateNotifier{}
s, err = NewService(t.Context(), cfg)
require.NoError(t, err)
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
addrs := make([]ma.Multiaddr, 0, len(nodes))
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
}
}
if len(addrs) == 0 {
t.Error("Expected to have valid peers, got 0")
}
require.LogsContain(t, hook, "Peer matches fork digest but has different next fork epoch")
require.NoError(t, s.Stop())
}
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
c.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): 0,
{0, 0, 0, 1}: 1,
}
nextForkEpoch := primitives.Epoch(1)
nextForkVersion := []byte{0, 0, 0, 1}
params.OverrideBeaconConfig(c)
params.BeaconConfig().InitializeForkSchedule()
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
digest, err := forks.CreateForkDigest(genesisTime, make([]byte, 32))
require.NoError(t, err)
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
enrForkID := &pb.ENRForkID{
CurrentForkDigest: digest[:],
NextForkVersion: nextForkVersion,
NextForkEpoch: nextForkEpoch,
CurrentForkDigest: current.ForkDigest[:],
NextForkVersion: next.ForkVersion[:],
NextForkEpoch: next.Epoch,
}
enc, err := enrForkID.MarshalSSZ()
require.NoError(t, err)
entry := enr.WithEntry(eth2ENRKey, enc)
// In epoch 1 of current time, the fork version should be
// {0, 0, 0, 1} according to the configuration override above.
temp := t.TempDir()
randNum := rand.Int()
tempPath := path.Join(temp, strconv.Itoa(randNum))
@@ -255,18 +134,16 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
localNode := enode.NewLocalNode(db, pkey)
localNode.Set(entry)
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, genesisValidatorsRoot)
require.NoError(t, err)
resp, err := forkEntry(localNode.Node().Record())
require.NoError(t, err)
assert.DeepEqual(t, want[:], resp.CurrentForkDigest)
assert.DeepEqual(t, nextForkVersion, resp.NextForkVersion)
assert.Equal(t, nextForkEpoch, resp.NextForkEpoch, "Unexpected next fork epoch")
assert.Equal(t, hexutil.Encode(current.ForkDigest[:]), hexutil.Encode(resp.CurrentForkDigest))
assert.Equal(t, hexutil.Encode(next.ForkVersion[:]), hexutil.Encode(resp.NextForkVersion))
assert.Equal(t, next.Epoch, resp.NextForkEpoch, "Unexpected next fork epoch")
}
func TestAddForkEntry_Genesis(t *testing.T) {
func TestAddForkEntry_NextForkVersion(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
temp := t.TempDir()
randNum := rand.Int()
tempPath := path.Join(temp, strconv.Itoa(randNum))
@@ -276,17 +153,27 @@ func TestAddForkEntry_Genesis(t *testing.T) {
db, err := enode.OpenDB("")
require.NoError(t, err)
bCfg := params.MainnetConfig()
bCfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{}
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] = bCfg.GenesisEpoch
params.OverrideBeaconConfig(bCfg)
localNode := enode.NewLocalNode(db, pkey)
localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32))
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
// Add the fork entry to the local node's ENR.
require.NoError(t, updateENR(localNode, current, next))
fe, err := forkEntry(localNode.Node().Record())
require.NoError(t, err)
forkEntry, err := forkEntry(localNode.Node().Record())
require.NoError(t, err)
assert.DeepEqual(t,
params.BeaconConfig().GenesisForkVersion, forkEntry.NextForkVersion,
assert.Equal(t,
hexutil.Encode(params.BeaconConfig().AltairForkVersion), hexutil.Encode(fe.NextForkVersion),
"Wanted Next Fork Version to be equal to genesis fork version")
last := params.LastForkEpoch()
current = params.GetNetworkScheduleEntry(last)
next = params.NextNetworkScheduleEntry(last)
require.NoError(t, updateENR(localNode, current, next))
entry := params.NextNetworkScheduleEntry(last)
fe, err = forkEntry(localNode.Node().Record())
require.NoError(t, err)
assert.Equal(t,
hexutil.Encode(entry.ForkVersion[:]), hexutil.Encode(fe.NextForkVersion),
"Wanted Next Fork Version to be equal to last entry in schedule")
}

View File

@@ -10,26 +10,22 @@ import (
// changes.
func (s *Service) forkWatcher() {
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
var scheduleEntry params.NetworkScheduleEntry
for {
select {
case currSlot := <-slotTicker.C():
currEpoch := slots.ToEpoch(currSlot)
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
currEpoch == params.BeaconConfig().DenebForkEpoch ||
currEpoch == params.BeaconConfig().ElectraForkEpoch ||
currEpoch == params.BeaconConfig().FuluForkEpoch {
// If we are in the fork epoch, we update our enr with
// the updated fork digest. These repeatedly does
// this over the epoch, which might be slightly wasteful
// but is fine nonetheless.
if s.dv5Listener != nil { // make sure it's not a local network
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not add fork entry")
}
if s.dv5Listener == nil {
continue // TODO: Should forkWatcher run at all if we're in "local" mode?
}
currentEpoch := slots.ToEpoch(currSlot)
newEntry := params.GetNetworkScheduleEntry(currentEpoch)
if newEntry.ForkDigest != scheduleEntry.ForkDigest {
nextEntry := params.NextNetworkScheduleEntry(currentEpoch)
if err := updateENR(s.dv5Listener.LocalNode(), newEntry, nextEntry); err != nil {
log.WithFields(newEntry.LogFields()).WithError(err).Error("Could not add fork entry")
continue // don't replace scheduleEntry until this succeeds
}
scheduleEntry = newEntry
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")

View File

@@ -13,11 +13,14 @@ import (
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
agentVersionKey = "AgentVersion"
// The time to wait for a status request.
timeForStatus = 10 * time.Second
)
@@ -28,29 +31,16 @@ func peerMultiaddrString(conn network.Conn) string {
return fmt.Sprintf("%s/p2p/%s", remoteMultiaddr, remotePeerID)
}
func peerAgentString(pid peer.ID, host host.Host) string {
const unknownAgent = "unknown"
rawAgent, err := host.Peerstore().Get(pid, "AgentVersion")
if err != nil {
return unknownAgent
}
agent, ok := rawAgent.(string)
if !ok {
return unknownAgent
}
return agent
}
func (s *Service) connectToPeer(conn network.Conn) {
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
remotePeer := conn.RemotePeer()
s.peers.SetConnectionState(remotePeer, peers.Connected)
// Go through the handshake process.
log.WithFields(logrus.Fields{
"direction": conn.Stat().Direction.String(),
"multiAddr": peerMultiaddrString(conn),
"activePeers": len(s.peers.Active()),
"agent": agentString(remotePeer, s.Host()),
}).Debug("Initiate peer connection")
}
@@ -76,9 +66,9 @@ func (s *Service) disconnectFromPeerOnError(
WithError(badPeerErr).
WithFields(logrus.Fields{
"multiaddr": peerMultiaddrString(conn),
"agent": peerAgentString(remotePeerID, s.host),
"direction": conn.Stat().Direction.String(),
"remainingActivePeers": len(s.peers.Active()),
"agent": agentString(remotePeerID, s.Host()),
}).
Debug("Initiate peer disconnection")
@@ -90,8 +80,8 @@ func (s *Service) disconnectFromPeerOnError(
// and validating the response from the peer.
func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Context, id peer.ID) error) {
// Peer map and lock to keep track of current connection attempts.
var peerLock sync.Mutex
peerMap := make(map[peer.ID]bool)
peerLock := new(sync.Mutex)
// This is run at the start of each connection attempt, to ensure
// that there aren't multiple inflight connection requests for the
@@ -119,6 +109,19 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
s.host.Network().Notify(&network.NotifyBundle{
ConnectedF: func(_ network.Network, conn network.Conn) {
remotePeer := conn.RemotePeer()
log := log.WithField("peer", remotePeer)
direction := conn.Stat().Direction
// For some reason, right after a disconnection, this `ConnectedF` callback
// is called. We want to avoid processing this connection if the peer was
// disconnected too recently and if we are at the initiative of this connection.
// This is very probably a bug in libp2p.
if direction == network.DirOutbound {
if err := s.wasDisconnectedTooRecently(remotePeer); err != nil {
log.WithError(err).Debug("Skipping connection handler")
return
}
}
// Connection handler must be non-blocking as part of libp2p design.
go func() {
@@ -144,53 +147,56 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
return
}
// Do not perform handshake on inbound dials.
if conn.Stat().Direction == network.DirInbound {
_, err := s.peers.ChainState(remotePeer)
peerExists := err == nil
currentTime := prysmTime.Now()
// Wait for peer to initiate handshake
time.Sleep(timeForStatus)
// Exit if we are disconnected with the peer.
if s.host.Network().Connectedness(remotePeer) != network.Connected {
if direction != network.DirInbound {
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
return
}
// If peer hasn't sent a status request, we disconnect with them
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
statusMessageMissing.Inc()
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
return
}
if peerExists {
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
if err != nil {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
return
}
// Exit if we don't receive any current status messages from peer.
if updated.IsZero() {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
return
}
if updated.Before(currentTime) {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
return
}
}
s.connectToPeer(conn)
return
}
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
// The connection is inbound.
_, err = s.peers.ChainState(remotePeer)
peerExists := err == nil
currentTime := prysmTime.Now()
// Wait for peer to initiate handshake
time.Sleep(timeForStatus)
// Exit if we are disconnected with the peer.
if s.host.Network().Connectedness(remotePeer) != network.Connected {
return
}
// If peer hasn't sent a status request, we disconnect with them
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
statusMessageMissing.Inc()
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
return
}
if !peerExists {
s.connectToPeer(conn)
return
}
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
if err != nil {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
return
}
// Exit if we don't receive any current status messages from peer.
if updated.IsZero() {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
return
}
if updated.Before(currentTime) {
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
return
}
@@ -207,9 +213,10 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
DisconnectedF: func(net network.Network, conn network.Conn) {
peerID := conn.RemotePeer()
log.WithFields(logrus.Fields{
log := log.WithFields(logrus.Fields{
"multiAddr": peerMultiaddrString(conn),
"direction": conn.Stat().Direction.String(),
"agent": agentString(peerID, s.Host()),
})
// Must be handled in a goroutine as this callback cannot be blocking.
go func() {
@@ -230,6 +237,12 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
}
s.peers.SetConnectionState(peerID, peers.Disconnected)
if err := s.peerDisconnectionTime.Add(peerID.String(), time.Now(), cache.DefaultExpiration); err != nil {
// The `DisconnectedF` funcition already called for this peer less than `cache.DefaultExpiration` ago. Skip.
// (Very probably a bug in libp2p.)
log.WithError(err).Trace("Failed to set peer disconnection time")
return
}
// Only log disconnections if we were fully connected.
if priorState == peers.Connected {
@@ -240,3 +253,36 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
},
})
}
// wasDisconnectedTooRecently checks if the peer was disconnected within the last second.
func (s *Service) wasDisconnectedTooRecently(peerID peer.ID) error {
const disconnectionDurationThreshold = 1 * time.Second
peerDisconnectionTimeObj, ok := s.peerDisconnectionTime.Get(peerID.String())
if !ok {
return nil
}
peerDisconnectionTime, ok := peerDisconnectionTimeObj.(time.Time)
if !ok {
return errors.New("invalid peer disconnection time type")
}
timeSinceDisconnection := time.Since(peerDisconnectionTime)
if timeSinceDisconnection < disconnectionDurationThreshold {
return errors.Errorf("peer %s was disconnected too recently: %s", peerID, timeSinceDisconnection)
}
return nil
}
func agentString(pid peer.ID, hst host.Host) string {
rawVersion, storeErr := hst.Peerstore().Get(pid, agentVersionKey)
result, ok := rawVersion.(string)
if storeErr != nil || !ok {
result = ""
}
return result
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -35,10 +36,11 @@ type (
DataColumnsHandler
}
// Accessor provides access to the Broadcaster and PeerManager interfaces.
// Accessor provides access to the Broadcaster, PeerManager and DataColumnsHandler interfaces.
Accessor interface {
Broadcaster
PeerManager
DataColumnsHandler
}
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
@@ -49,7 +51,7 @@ type (
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar, peersChecked ...chan<- bool) error
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
@@ -98,7 +100,7 @@ type (
NodeID() enode.ID
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
RefreshPersistentSubnets()
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
}
@@ -120,6 +122,9 @@ type (
// DataColumnsHandler abstracts some data columns related methods.
DataColumnsHandler interface {
EarliestAvailableSlot() primitives.Slot
CustodyGroupCount() uint64
UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
CustodyGroupCountFromPeer(peer.ID) uint64
}
)

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
"github.com/OffchainLabs/prysm/v6/network/forks"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
)
@@ -39,7 +38,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
copy(msg, "invalid")
return bytesutil.UnsafeCastToString(msg)
}
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
_, fEpoch, err := params.ForkDataFromDigest(digest)
if err != nil {
// Impossible condition that should
// never be hit.

View File

@@ -7,10 +7,10 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/golang/snappy"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
@@ -18,28 +18,27 @@ import (
func TestMsgID_HashesCorrectly(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
assert.NoError(t, err)
clock := startup.NewClock(time.Now(), bytesutil.ToBytes32([]byte{'A'}))
valRoot := clock.GenesisValidatorsRoot()
d := params.ForkDigest(clock.CurrentEpoch())
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
hashedData := hash.Hash(append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
msgID := string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], pMsg), "Got incorrect msg id")
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
enc := snappy.Encode(nil, validObj[:])
nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
hashedData = hash.Hash(append(params.BeaconConfig().MessageDomainValidSnappy[:], validObj[:]...))
msgID = string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], nMsg), "Got incorrect msg id")
}
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genesisValidatorsRoot)
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
assert.NoError(t, err)
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
topicLen := uint64(len(tpc))
@@ -52,7 +51,7 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
combinedObj = append(combinedObj, pMsg.Data...)
hashedData := hash.Hash(combinedObj)
msgID := string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
enc := snappy.Encode(nil, validObj[:])
@@ -63,13 +62,12 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
combinedObj = append(combinedObj, validObj[:]...)
hashedData = hash.Hash(combinedObj)
msgID = string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
}
func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
params.SetupTestConfigCleanup(t)
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot)
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
assert.NoError(t, err)
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
topicLen := uint64(len(tpc))
@@ -82,7 +80,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
combinedObj = append(combinedObj, pMsg.Data...)
hashedData := hash.Hash(combinedObj)
msgID := string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
enc := snappy.Encode(nil, validObj[:])
@@ -93,7 +91,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
combinedObj = append(combinedObj, validObj[:]...)
hashedData = hash.Hash(combinedObj)
msgID = string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
}
func TestMsgID_WithNilTopic(t *testing.T) {

View File

@@ -100,21 +100,24 @@ func (s *BadResponsesScorer) countNoLock(pid peer.ID) (int, error) {
// Increment increments the number of bad responses we have received from the given remote peer.
// If peer doesn't exist this method is no-op.
func (s *BadResponsesScorer) Increment(pid peer.ID) {
func (s *BadResponsesScorer) Increment(pid peer.ID) int {
if pid == "" {
return
return 0
}
s.store.Lock()
defer s.store.Unlock()
peerData, ok := s.store.PeerData(pid)
if !ok {
s.store.SetPeerData(pid, &peerdata.PeerData{
BadResponses: 1,
})
return
if ok {
peerData.BadResponses++
return peerData.BadResponses
}
peerData.BadResponses++
const badResponses = 1
peerData = &peerdata.PeerData{BadResponses: badResponses}
s.store.SetPeerData(pid, peerData)
return badResponses
}
// IsBadPeer states if the peer is to be considered bad.

View File

@@ -395,7 +395,7 @@ func (p *Status) SetNextValidTime(pid peer.ID, nextTime time.Time) {
peerData.NextValidTime = nextTime
}
// RandomizeBackOff adds extra backoff period during which peer will not be dialed.
// RandomizeBackOff adds extra backoff period during which peer won't be dialed.
func (p *Status) RandomizeBackOff(pid peer.ID) {
p.store.Lock()
defer p.store.Unlock()

View File

@@ -40,7 +40,7 @@ const (
rSubD = 8 // random gossip target
)
var errInvalidTopic = errors.New("invalid topic format")
var ErrInvalidTopic = errors.New("invalid topic format")
// Specifies the fixed size context length.
const digestLength = 4
@@ -219,12 +219,12 @@ func convertTopicScores(topicMap map[string]*pubsub.TopicScoreSnapshot) map[stri
func ExtractGossipDigest(topic string) ([4]byte, error) {
// Ensure the topic prefix is correct.
if len(topic) < len(gossipTopicPrefix)+1 || topic[:len(gossipTopicPrefix)] != gossipTopicPrefix {
return [4]byte{}, errInvalidTopic
return [4]byte{}, ErrInvalidTopic
}
start := len(gossipTopicPrefix)
end := strings.Index(topic[start:], "/")
if end == -1 { // Ensure a topic suffix exists.
return [4]byte{}, errInvalidTopic
return [4]byte{}, ErrInvalidTopic
}
end += start
strDigest := topic[start:end]

View File

@@ -1,12 +1,12 @@
package p2p
import (
"encoding/hex"
"fmt"
"strings"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/network/forks"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
@@ -32,6 +32,14 @@ var _ pubsub.SubscriptionFilter = (*Service)(nil)
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
const pubsubSubscriptionRequestLimit = 500
func (s *Service) setAllForkDigests() {
entries := params.SortedNetworkScheduleEntries()
s.allForkDigests = make(map[[4]byte]struct{}, len(entries))
for _, entry := range entries {
s.allForkDigests[entry.ForkDigest] = struct{}{}
}
}
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
func (s *Service) CanSubscribe(topic string) bool {
if !s.isInitialized() {
@@ -48,50 +56,18 @@ func (s *Service) CanSubscribe(topic string) bool {
if parts[1] != "eth2" {
return false
}
phase0ForkDigest, err := s.currentForkDigest()
var digest [4]byte
dl, err := hex.Decode(digest[:], []byte(parts[2]))
if err == nil && dl != 4 {
err = fmt.Errorf("expected 4 bytes, got %d", dl)
}
if err != nil {
log.WithError(err).Error("Could not determine fork digest")
log.WithError(err).WithField("topic", topic).WithField("digest", parts[2]).Error("CanSubscribe failed to parse message")
return false
}
altairForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine altair fork digest")
return false
}
bellatrixForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Bellatrix fork digest")
return false
}
capellaForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Capella fork digest")
return false
}
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Deneb fork digest")
return false
}
electraForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Electra fork digest")
return false
}
fuluForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Fulu fork digest")
return false
}
switch parts[2] {
case fmt.Sprintf("%x", phase0ForkDigest):
case fmt.Sprintf("%x", altairForkDigest):
case fmt.Sprintf("%x", bellatrixForkDigest):
case fmt.Sprintf("%x", capellaForkDigest):
case fmt.Sprintf("%x", denebForkDigest):
case fmt.Sprintf("%x", electraForkDigest):
case fmt.Sprintf("%x", fuluForkDigest):
default:
if _, ok := s.allForkDigests[digest]; !ok {
log.WithField("topic", topic).WithField("digest", fmt.Sprintf("%#x", digest)).Error("CanSubscribe failed to find digest in allForkDigests")
return false
}

View File

@@ -11,8 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
@@ -21,12 +19,11 @@ import (
func TestService_CanSubscribe(t *testing.T) {
params.SetupTestConfigCleanup(t)
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
params.BeaconConfig().InitializeForkSchedule()
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
genesisTime := time.Now()
var valRoot [32]byte
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
assert.NoError(t, err)
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
currentFork := params.GetNetworkScheduleEntry(clock.CurrentEpoch()).ForkDigest
digest := params.ForkDigest(clock.CurrentEpoch())
type test struct {
name string
topic string
@@ -108,12 +105,14 @@ func TestService_CanSubscribe(t *testing.T) {
}
tests = append(tests, tt)
}
valRoot := clock.GenesisValidatorsRoot()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
genesisValidatorsRoot: valRoot[:],
genesisTime: genesisTime,
genesisTime: clock.GenesisTime(),
}
s.setAllForkDigests()
if got := s.CanSubscribe(tt.topic); got != tt.want {
t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want)
}
@@ -219,11 +218,10 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
func TestService_FilterIncomingSubscriptions(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
digest := params.ForkDigest(clock.CurrentEpoch())
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
genesisTime := time.Now()
var valRoot [32]byte
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
assert.NoError(t, err)
type args struct {
id peer.ID
subs []*pubsubpb.RPC_SubOpts
@@ -320,12 +318,14 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
},
},
}
valRoot := clock.GenesisValidatorsRoot()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
genesisValidatorsRoot: valRoot[:],
genesisTime: genesisTime,
genesisTime: clock.GenesisTime(),
}
s.setAllForkDigests()
got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs)
if (err != nil) != tt.wantErr {
t.Errorf("FilterIncomingSubscriptions() error = %v, wantErr %v", err, tt.wantErr)

View File

@@ -169,7 +169,7 @@ var (
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
// DataColumnSidecarsByRoot v1 Message
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
}
// Maps all registered protocol prefixes.

View File

@@ -14,8 +14,10 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
prysmnetwork "github.com/OffchainLabs/prysm/v6/network"
@@ -31,30 +33,35 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/multiformats/go-multiaddr"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var _ runtime.Service = (*Service)(nil)
// In the event that we are at our peer limit, we
// stop looking for new peers and instead poll
// for the current peer limit status for the time period
// defined below.
var pollingPeriod = 6 * time.Second
const (
// When looking for new nodes, if not enough nodes are found,
// we stop after this spent time.
batchPeriod = 2 * time.Second
// When looking for new nodes, if not enough nodes are found,
// we stop after this spent time.
var batchPeriod = 2 * time.Second
// maxBadResponses is the maximum number of bad responses from a peer before we stop talking to it.
maxBadResponses = 5
)
// Refresh rate of ENR set at twice per slot.
var refreshRate = slots.DivideSlotBy(2)
var (
// Refresh rate of ENR set at twice per slot.
refreshRate = slots.DivideSlotBy(2)
// maxBadResponses is the maximum number of bad responses from a peer before we stop talking to it.
const maxBadResponses = 5
// maxDialTimeout is the timeout for a single peer dial.
maxDialTimeout = params.BeaconConfig().RespTimeoutDuration()
// maxDialTimeout is the timeout for a single peer dial.
var maxDialTimeout = params.BeaconConfig().RespTimeoutDuration()
// In the event that we are at our peer limit, we
// stop looking for new peers and instead poll
// for the current peer limit status for the time period
// defined below.
pollingPeriod = 6 * time.Second
)
// Service for managing peer to peer (p2p) networking.
type Service struct {
@@ -82,6 +89,12 @@ type Service struct {
genesisTime time.Time
genesisValidatorsRoot []byte
activeValidatorCount uint64
peerDisconnectionTime *cache.Cache
custodyInfoMut sync.RWMutex // Protects custodyGroupCount and earliestAvailableSlot
custodyGroupCount uint64
earliestAvailableSlot primitives.Slot
clock *startup.Clock
allForkDigests map[[4]byte]struct{}
}
// NewService initializes a new p2p service compatible with shared.Service interface. No
@@ -111,16 +124,17 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
ipLimiter := leakybucket.NewCollector(ipLimit, ipBurst, 30*time.Second, true /* deleteEmptyBuckets */)
s := &Service{
ctx: ctx,
cancel: cancel,
cfg: cfg,
addrFilter: addrFilter,
ipLimiter: ipLimiter,
privKey: privKey,
metaData: metaData,
isPreGenesis: true,
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
subnetsLock: make(map[uint64]*sync.RWMutex),
ctx: ctx,
cancel: cancel,
cfg: cfg,
addrFilter: addrFilter,
ipLimiter: ipLimiter,
privKey: privKey,
metaData: metaData,
isPreGenesis: true,
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
subnetsLock: make(map[uint64]*sync.RWMutex),
peerDisconnectionTime: cache.New(1*time.Second, 1*time.Minute),
}
ipAddr := prysmnetwork.IPAddr()
@@ -185,6 +199,7 @@ func (s *Service) Start() {
// Waits until the state is initialized via an event feed.
// Used for fork-related data when connecting peers.
s.awaitStateInitialized()
s.setAllForkDigests()
s.isPreGenesis = false
var relayNodes []string
@@ -251,6 +266,7 @@ func (s *Service) Start() {
"inboundTCP": inboundTCPCount,
"outboundTCP": outboundTCPCount,
"total": total,
"target": s.cfg.MaxPeers,
}
if features.Get().EnableQUIC {
@@ -403,7 +419,10 @@ func (s *Service) pingPeersAndLogEnr() {
defer s.pingMethodLock.RUnlock()
localENR := s.dv5Listener.Self()
log.WithField("ENR", localENR).Info("New node record")
log.WithFields(logrus.Fields{
"ENR": localENR,
"seq": localENR.Seq(),
}).Info("New node record")
if s.pingMethod == nil {
return
@@ -434,7 +453,7 @@ func (s *Service) awaitStateInitialized() {
s.genesisTime = clock.GenesisTime()
gvr := clock.GenesisValidatorsRoot()
s.genesisValidatorsRoot = gvr[:]
_, err = s.currentForkDigest() // initialize fork digest cache
_, err = s.currentForkDigest()
if err != nil {
log.WithError(err).Error("Could not initialize fork digest")
}
@@ -481,14 +500,17 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
if info.ID == s.host.ID() {
return nil
}
if err := s.Peers().IsBad(info.ID); err != nil {
return errors.Wrap(err, "refused to connect to bad peer")
return errors.Wrap(err, "bad peer")
}
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
defer cancel()
if err := s.host.Connect(ctx, info); err != nil {
s.Peers().Scorers().BadResponsesScorer().Increment(info.ID)
return err
s.downscorePeer(info.ID, "connectionError")
return errors.Wrap(err, "peer connect")
}
return nil
}
@@ -519,3 +541,8 @@ func (s *Service) connectToBootnodes() error {
func (s *Service) isInitialized() bool {
return !s.genesisTime.IsZero() && len(s.genesisValidatorsRoot) == 32
}
func (s *Service) downscorePeer(peerID peer.ID, reason string) {
newScore := s.Peers().Scorers().BadResponsesScorer().Increment(peerID)
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
}

View File

@@ -11,9 +11,8 @@ import (
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
@@ -70,7 +69,7 @@ func (mockListener) RandomNodes() enode.Iterator {
func (mockListener) RebootListener() error { panic("implement me") }
func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) {
func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP) {
_, pkey := createAddrAndPrivKey(t)
ipAddr := net.ParseIP("127.0.0.1")
listen, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
@@ -183,21 +182,33 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
}
func TestListenForNewNodes(t *testing.T) {
const (
port = uint(2000)
testPollingPeriod = 1 * time.Second
peerCount = 5
)
params.SetupTestConfigCleanup(t)
// Setup bootnode.
notifier := &mock.MockStateNotifier{}
cfg := &Config{StateNotifier: notifier, PingInterval: testPingInterval, DisableLivenessCheck: true}
port := 2000
cfg.UDPPort = uint(port)
cfg := &Config{
StateNotifier: &mock.MockStateNotifier{},
PingInterval: testPingInterval,
DisableLivenessCheck: true,
UDPPort: port,
}
_, pkey := createAddrAndPrivKey(t)
ipAddr := net.ParseIP("127.0.0.1")
genesisTime := prysmTime.Now()
var gvr [32]byte
var gvr [fieldparams.RootLength]byte
s := &Service{
cfg: cfg,
genesisTime: genesisTime,
genesisValidatorsRoot: gvr[:],
}
bootListener, err := s.createListener(ipAddr, pkey)
require.NoError(t, err)
defer bootListener.Close()
@@ -208,35 +219,40 @@ func TestListenForNewNodes(t *testing.T) {
// Use shorter period for testing.
currentPeriod := pollingPeriod
pollingPeriod = 1 * time.Second
pollingPeriod = testPollingPeriod
defer func() {
pollingPeriod = currentPeriod
}()
bootNode := bootListener.Self()
var listeners []*listenerWrapper
var hosts []host.Host
// setup other nodes.
// Setup other nodes.
cs := startup.NewClockSynchronizer()
cfg = &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
PingInterval: testPingInterval,
DisableLivenessCheck: true,
MaxPeers: 30,
ClockWaiter: cs,
}
for i := 1; i <= 5; i++ {
listeners := make([]*listenerWrapper, 0, peerCount)
hosts := make([]host.Host, 0, peerCount)
for i := uint(1); i <= peerCount; i++ {
cfg = &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
PingInterval: testPingInterval,
DisableLivenessCheck: true,
MaxPeers: peerCount,
ClockWaiter: cs,
UDPPort: port + i,
TCPPort: port + i,
}
h, pkey, ipAddr := createHost(t, port+i)
cfg.UDPPort = uint(port + i)
cfg.TCPPort = uint(port + i)
s := &Service{
cfg: cfg,
genesisTime: genesisTime,
genesisValidatorsRoot: gvr[:],
}
listener, err := s.startDiscoveryV5(ipAddr, pkey)
assert.NoError(t, err, "Could not start discovery for node")
require.NoError(t, err, "Could not start discovery for node")
listeners = append(listeners, listener)
hosts = append(hosts, h)
}
@@ -261,19 +277,26 @@ func TestListenForNewNodes(t *testing.T) {
s, err = NewService(t.Context(), cfg)
require.NoError(t, err)
exitRoutine := make(chan bool)
go func() {
s.Start()
<-exitRoutine
}()
time.Sleep(1 * time.Second)
require.NoError(t, cs.SetClock(startup.NewClock(genesisTime, gvr)))
go s.Start()
time.Sleep(4 * time.Second)
assert.Equal(t, 5, len(s.host.Network().Peers()), "Not all peers added to peerstore")
require.NoError(t, s.Stop())
exitRoutine <- true
err = cs.SetClock(startup.NewClock(genesisTime, gvr))
require.NoError(t, err, "Could not set clock in service")
actualPeerCount := len(s.host.Network().Peers())
for range 40 {
if actualPeerCount == peerCount {
break
}
time.Sleep(100 * time.Millisecond)
actualPeerCount = len(s.host.Network().Peers())
}
assert.Equal(t, peerCount, actualPeerCount, "Not all peers added to peerstore")
err = s.Stop()
require.NoError(t, err, "Failed to stop service")
}
func TestPeer_Disconnect(t *testing.T) {
@@ -309,14 +332,16 @@ func TestPeer_Disconnect(t *testing.T) {
func TestService_JoinLeaveTopic(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
defer cancel()
gs := startup.NewClockSynchronizer()
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs})
require.NoError(t, err)
go s.awaitStateInitialized()
fd := initializeStateWithForkDigest(ctx, t, gs)
s.setAllForkDigests()
s.awaitStateInitialized()
assert.Equal(t, 0, len(s.joinedTopics))
@@ -345,18 +370,16 @@ func TestService_JoinLeaveTopic(t *testing.T) {
// digest associated with that genesis event.
func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte {
gt := prysmTime.Now()
gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32))
require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr)))
fd, err := forks.CreateForkDigest(gt, gvr[:])
require.NoError(t, err)
gvr := params.BeaconConfig().GenesisValidatorsRoot
clock := startup.NewClock(gt, gvr)
require.NoError(t, gs.SetClock(clock))
time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
return fd
return params.ForkDigest(clock.CurrentEpoch())
}
// TODO: Uncomment when out of devnet
// TODO: Uncomment out of devnet.
// func TestService_connectWithPeer(t *testing.T) {
// params.SetupTestConfigCleanup(t)
// tests := []struct {
@@ -368,7 +391,7 @@ func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.C
// {
// name: "bad peer",
// peers: func() *peers.Status {
// ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
// ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
// ScorerParams: &scorers.Config{},
// })
// for i := 0; i < 10; i++ {
@@ -377,7 +400,7 @@ func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.C
// return ps
// }(),
// info: peer.AddrInfo{ID: "bad"},
// wantErr: "refused to connect to bad peer",
// wantErr: "bad peer",
// },
// }
// for _, tt := range tests {
@@ -388,7 +411,7 @@ func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.C
// t.Fatal(err)
// }
// }()
// ctx := context.Background()
// ctx := t.Context()
// s := &Service{
// host: h,
// peers: tt.peers,

Some files were not shown because too many files have changed in this diff Show More