mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
30 Commits
v6.0.5-rc.
...
inconsiste
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
328f64f832 | ||
|
|
46e7555c42 | ||
|
|
181df3995e | ||
|
|
149e220b61 | ||
|
|
ae4b982a6c | ||
|
|
f330021785 | ||
|
|
bd6b4ecd5b | ||
|
|
d7d8764a91 | ||
|
|
9b7f91d947 | ||
|
|
57e27199bd | ||
|
|
11ca766ed6 | ||
|
|
cd6cc76d58 | ||
|
|
fc4a1469f0 | ||
|
|
f3dc4c283e | ||
|
|
6ddf271688 | ||
|
|
af7afba26e | ||
|
|
b740a4ff83 | ||
|
|
385c2224e8 | ||
|
|
04b39d1a4d | ||
|
|
4c40caf7fd | ||
|
|
bc209cadab | ||
|
|
856742ff68 | ||
|
|
abe16a9cb4 | ||
|
|
77958022e7 | ||
|
|
c21fae239f | ||
|
|
deb3ba7f21 | ||
|
|
f288a3c0e1 | ||
|
|
a4ca6355d0 | ||
|
|
cd0821d026 | ||
|
|
8b53887891 |
@@ -190,7 +190,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
# A multi-arch base image
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
digest = "sha256:55a5e011b2c4246b4c51e01fcc2b452d151e03df052e357465f0392fcd59fddf",
|
||||
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
@@ -208,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.24.0",
|
||||
go_version = "1.24.5",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
|
||||
@@ -2,18 +2,28 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["common.go"],
|
||||
srcs = [
|
||||
"common.go",
|
||||
"header.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/api/apiutil",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//consensus-types/primitives:go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["common_test.go"],
|
||||
srcs = [
|
||||
"common_test.go",
|
||||
"header_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
122
api/apiutil/header.go
Normal file
122
api/apiutil/header.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type mediaRange struct {
|
||||
mt string // canonicalised media‑type, e.g. "application/json"
|
||||
q float64 // quality factor (0‑1)
|
||||
raw string // original string – useful for logging/debugging
|
||||
spec int // 2=exact, 1=type/*, 0=*/*
|
||||
}
|
||||
|
||||
func parseMediaRange(field string) (mediaRange, bool) {
|
||||
field = strings.TrimSpace(field)
|
||||
|
||||
mt, params, err := mime.ParseMediaType(field)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to parse header field")
|
||||
return mediaRange{}, false
|
||||
}
|
||||
|
||||
r := mediaRange{mt: mt, q: 1, spec: 2, raw: field}
|
||||
|
||||
if qs, ok := params["q"]; ok {
|
||||
v, err := strconv.ParseFloat(qs, 64)
|
||||
if err != nil || v < 0 || v > 1 {
|
||||
log.WithField("q", qs).Debug("Invalid quality factor (0‑1)")
|
||||
return mediaRange{}, false // skip invalid entry
|
||||
}
|
||||
r.q = v
|
||||
}
|
||||
|
||||
switch {
|
||||
case mt == "*/*":
|
||||
r.spec = 0
|
||||
case strings.HasSuffix(mt, "/*"):
|
||||
r.spec = 1
|
||||
}
|
||||
return r, true
|
||||
}
|
||||
|
||||
func hasExplicitQ(r mediaRange) bool {
|
||||
return strings.Contains(strings.ToLower(r.raw), ";q=")
|
||||
}
|
||||
|
||||
// ParseAccept returns media ranges sorted by q (desc) then specificity.
|
||||
func ParseAccept(header string) []mediaRange {
|
||||
if header == "" {
|
||||
return []mediaRange{{mt: "*/*", q: 1, spec: 0, raw: "*/*"}}
|
||||
}
|
||||
|
||||
var out []mediaRange
|
||||
for _, field := range strings.Split(header, ",") {
|
||||
if r, ok := parseMediaRange(field); ok {
|
||||
out = append(out, r)
|
||||
}
|
||||
}
|
||||
|
||||
sort.SliceStable(out, func(i, j int) bool {
|
||||
ei, ej := hasExplicitQ(out[i]), hasExplicitQ(out[j])
|
||||
if ei != ej {
|
||||
return ei // explicit beats implicit
|
||||
}
|
||||
if out[i].q != out[j].q {
|
||||
return out[i].q > out[j].q
|
||||
}
|
||||
return out[i].spec > out[j].spec
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
// Matches reports whether content type is acceptable per the header.
|
||||
func Matches(header, ct string) bool {
|
||||
for _, r := range ParseAccept(header) {
|
||||
switch {
|
||||
case r.q == 0:
|
||||
continue
|
||||
case r.mt == "*/*":
|
||||
return true
|
||||
case strings.HasSuffix(r.mt, "/*"):
|
||||
if strings.HasPrefix(ct, r.mt[:len(r.mt)-1]) {
|
||||
return true
|
||||
}
|
||||
case r.mt == ct:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Negotiate selects the best server type according to the header.
|
||||
// Returns the chosen type and true, or "", false when nothing matches.
|
||||
func Negotiate(header string, serverTypes []string) (string, bool) {
|
||||
for _, r := range ParseAccept(header) {
|
||||
if r.q == 0 {
|
||||
continue
|
||||
}
|
||||
for _, s := range serverTypes {
|
||||
if Matches(r.mt, s) {
|
||||
return s, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// PrimaryAcceptMatches only checks if the first accept matches
|
||||
func PrimaryAcceptMatches(header, produced string) bool {
|
||||
for _, r := range ParseAccept(header) {
|
||||
if r.q == 0 {
|
||||
continue // explicitly unacceptable – skip
|
||||
}
|
||||
return Matches(r.mt, produced)
|
||||
}
|
||||
return false
|
||||
}
|
||||
174
api/apiutil/header_test.go
Normal file
174
api/apiutil/header_test.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestParseAccept(t *testing.T) {
|
||||
type want struct {
|
||||
mt string
|
||||
q float64
|
||||
spec int
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
header string
|
||||
want []want
|
||||
}{
|
||||
{
|
||||
name: "empty header becomes */*;q=1",
|
||||
header: "",
|
||||
want: []want{{mt: "*/*", q: 1, spec: 0}},
|
||||
},
|
||||
{
|
||||
name: "quality ordering then specificity",
|
||||
header: "application/json;q=0.2, */*;q=0.1, application/xml;q=0.5, text/*;q=0.5",
|
||||
want: []want{
|
||||
{mt: "application/xml", q: 0.5, spec: 2},
|
||||
{mt: "text/*", q: 0.5, spec: 1},
|
||||
{mt: "application/json", q: 0.2, spec: 2},
|
||||
{mt: "*/*", q: 0.1, spec: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid pieces are skipped",
|
||||
header: "text/plain; q=boom, application/json",
|
||||
want: []want{{mt: "application/json", q: 1, spec: 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := ParseAccept(tc.header)
|
||||
gotProjected := make([]want, len(got))
|
||||
for i, g := range got {
|
||||
gotProjected[i] = want{mt: g.mt, q: g.q, spec: g.spec}
|
||||
}
|
||||
require.DeepEqual(t, gotProjected, tc.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatches(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
accept string
|
||||
ct string
|
||||
matches bool
|
||||
}{
|
||||
{"exact match", "application/json", "application/json", true},
|
||||
{"type wildcard", "application/*;q=0.8", "application/xml", true},
|
||||
{"global wildcard", "*/*;q=0.1", "image/png", true},
|
||||
{"explicitly unacceptable (q=0)", "text/*;q=0", "text/plain", false},
|
||||
{"no match", "image/png", "application/json", false},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := Matches(tc.accept, tc.ct)
|
||||
require.Equal(t, tc.matches, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNegotiate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
accept string
|
||||
serverTypes []string
|
||||
wantType string
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
name: "highest quality wins",
|
||||
accept: "application/json;q=0.8,application/xml;q=0.9",
|
||||
serverTypes: []string{"application/json", "application/xml"},
|
||||
wantType: "application/xml",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard matches first server type",
|
||||
accept: "*/*;q=0.5",
|
||||
serverTypes: []string{"application/octet-stream", "application/json"},
|
||||
wantType: "application/octet-stream",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "no acceptable type",
|
||||
accept: "image/png",
|
||||
serverTypes: []string{"application/json"},
|
||||
wantType: "",
|
||||
ok: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, ok := Negotiate(tc.accept, tc.serverTypes)
|
||||
require.Equal(t, tc.ok, ok)
|
||||
require.Equal(t, tc.wantType, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrimaryAcceptMatches(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
accept string
|
||||
produced string
|
||||
expect bool
|
||||
}{
|
||||
{
|
||||
name: "prefers json",
|
||||
accept: "application/json;q=0.9,application/xml",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard application beats other wildcard",
|
||||
accept: "application/*;q=0.2,*/*;q=0.1",
|
||||
produced: "application/xml",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json wins",
|
||||
accept: "application/xml;q=0.8,application/json;q=0.9",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json loses",
|
||||
accept: "application/xml;q=0.8,application/json;q=0.9,application/octet-stream;q=0.99",
|
||||
produced: "application/json",
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "json wins with non q option",
|
||||
accept: "application/xml;q=0.8,image/png,application/json;q=0.9",
|
||||
produced: "application/json",
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "json not primary",
|
||||
accept: "image/png,application/json",
|
||||
produced: "application/json",
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "absent header",
|
||||
accept: "",
|
||||
produced: "text/plain",
|
||||
expect: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := PrimaryAcceptMatches(tc.accept, tc.produced)
|
||||
require.Equal(t, got, tc.expect)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/apiutil"
|
||||
"github.com/rs/cors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -74,42 +75,10 @@ func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
|
||||
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
// header is optional and should skip if not provided
|
||||
if acceptHeader == "" {
|
||||
next.ServeHTTP(w, r)
|
||||
if _, ok := apiutil.Negotiate(r.Header.Get("Accept"), serverAcceptedTypes); !ok {
|
||||
http.Error(w, "Not Acceptable", http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
acceptTypes := strings.Split(acceptHeader, ",")
|
||||
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
|
||||
for _, acceptType := range acceptTypes {
|
||||
acceptType = strings.TrimSpace(acceptType)
|
||||
if acceptType == "*/*" {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
for _, serverAcceptedType := range serverAcceptedTypes {
|
||||
if strings.HasPrefix(acceptType, serverAcceptedType) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if accepted {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/validator"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -699,6 +700,11 @@ func (m *SyncCommitteeMessage) ToConsensus() (*eth.SyncCommitteeMessage, error)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
// Add validation to check if the signature is valid BLS format
|
||||
_, err = bls.SignatureFromBytes(sig)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SyncCommitteeMessage{
|
||||
Slot: primitives.Slot(slot),
|
||||
|
||||
@@ -283,3 +283,10 @@ type GetPendingPartialWithdrawalsResponse struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingPartialWithdrawal `json:"data"`
|
||||
}
|
||||
|
||||
type GetProposerLookaheadResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` // validator indexes
|
||||
}
|
||||
|
||||
@@ -27,6 +27,8 @@ type Identity struct {
|
||||
type Metadata struct {
|
||||
SeqNumber string `json:"seq_number"`
|
||||
Attnets string `json:"attnets"`
|
||||
Syncnets string `json:"syncnets,omitempty"`
|
||||
Cgc string `json:"custody_group_count,omitempty"`
|
||||
}
|
||||
|
||||
type GetPeerResponse struct {
|
||||
|
||||
@@ -73,6 +73,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -194,6 +195,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -174,6 +174,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), arg.headBlock, arg.headRoot, nextSlot)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
|
||||
@@ -102,8 +102,6 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/presets/mainnet/trusted_setups/trusted_setup_4096.json
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/presets/mainnet/trusted_setups/trusted_setup_4096.json
|
||||
//go:embed trusted_setup_4096.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -36,7 +35,7 @@ func WithMaxGoroutines(x int) Option {
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB)
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -235,14 +234,6 @@ func WithSyncChecker(checker Checker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustodyInfo sets the custody info for the blockchain service.
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.CustodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled sets whether the slasher is enabled or not.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -666,10 +666,11 @@ func (s *Service) areDataColumnsAvailable(
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
@@ -690,16 +691,20 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.CustodyInfo.Mut.RLock()
|
||||
defer s.cfg.CustodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -134,9 +133,6 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
if err := s.processLightClientUpdate(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client update")
|
||||
}
|
||||
if err := s.processLightClientBootstrap(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client bootstrap")
|
||||
}
|
||||
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client optimistic update")
|
||||
}
|
||||
@@ -174,15 +170,7 @@ func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientUpdateFromBeaconState(
|
||||
cfg.ctx,
|
||||
s.CurrentSlot(),
|
||||
cfg.postState,
|
||||
cfg.roblock,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
finalizedBlock,
|
||||
)
|
||||
update, err := lightclient.NewLightClientUpdateFromBeaconState(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client update")
|
||||
}
|
||||
@@ -192,20 +180,6 @@ func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
return s.lcStore.SaveLightClientUpdate(cfg.ctx, period, update)
|
||||
}
|
||||
|
||||
// processLightClientBootstrap saves a light client bootstrap for this block
|
||||
// when feature flag is enabled.
|
||||
func (s *Service) processLightClientBootstrap(cfg *postBlockProcessConfig) error {
|
||||
blockRoot := cfg.roblock.Root()
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(cfg.ctx, s.CurrentSlot(), cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client bootstrap")
|
||||
}
|
||||
if err := s.lcStore.SaveLightClientBootstrap(cfg.ctx, blockRoot, bootstrap); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client bootstrap")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientFinalityUpdate(
|
||||
ctx context.Context,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
@@ -223,8 +197,7 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
|
||||
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
|
||||
|
||||
// Check if the finalized checkpoint has changed
|
||||
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
|
||||
if finalizedCheckpoint == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -238,51 +211,18 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState.Slot(),
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
finalizedBlock,
|
||||
)
|
||||
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock, finalizedBlock)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create light client finality update")
|
||||
}
|
||||
|
||||
lastUpdate := s.lcStore.LastFinalityUpdate()
|
||||
if lastUpdate != nil {
|
||||
// The finalized_header.beacon.lastUpdateSlot is greater than that of all previously forwarded finality_updates,
|
||||
// or it matches the highest previously forwarded lastUpdateSlot and also has a sync_aggregate indicating supermajority (> 2/3)
|
||||
// sync committee participation while the previously forwarded finality_update for that lastUpdateSlot did not indicate supermajority
|
||||
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
|
||||
newHasSupermajority := lightclient.UpdateHasSupermajority(newUpdate.SyncAggregate())
|
||||
|
||||
lastUpdateSlot := lastUpdate.FinalizedHeader().Beacon().Slot
|
||||
lastHasSupermajority := lightclient.UpdateHasSupermajority(lastUpdate.SyncAggregate())
|
||||
|
||||
if newUpdateSlot < lastUpdateSlot {
|
||||
log.Debug("Skip saving light client finality newUpdate: Older than local newUpdate")
|
||||
return nil
|
||||
}
|
||||
if newUpdateSlot == lastUpdateSlot && (lastHasSupermajority || !newHasSupermajority) {
|
||||
log.Debug("Skip saving light client finality update: No supermajority advantage")
|
||||
return nil
|
||||
}
|
||||
if !lightclient.IsBetterFinalityUpdate(newUpdate, s.lcStore.LastFinalityUpdate()) {
|
||||
log.Debug("Skip saving light client finality update: current update is better")
|
||||
return nil
|
||||
}
|
||||
log.Debug("Saving new light client finality update")
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client finality update")
|
||||
}
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -299,14 +239,7 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState.Slot(),
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
)
|
||||
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
|
||||
@@ -316,26 +249,12 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
|
||||
return errors.Wrap(err, "could not create light client optimistic update")
|
||||
}
|
||||
|
||||
lastUpdate := s.lcStore.LastOptimisticUpdate()
|
||||
if lastUpdate != nil {
|
||||
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
|
||||
if newUpdate.AttestedHeader().Beacon().Slot <= lastUpdate.AttestedHeader().Beacon().Slot {
|
||||
log.Debug("Skip saving light client optimistic update: Older than local update")
|
||||
return nil
|
||||
}
|
||||
if !lightclient.IsBetterOptimisticUpdate(newUpdate, s.lcStore.LastOptimisticUpdate()) {
|
||||
log.Debug("Skip saving light client optimistic update: current update is better")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug("Saving new light client optimistic update")
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client optimistic update")
|
||||
}
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2815,53 +2815,6 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
ctx := tr.ctx
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().VersionToForkEpochMap()[testVersion])*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
require.NoError(t, s.processLightClientBootstrap(cfg))
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
stateRoot, err := l.State.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), testVersion)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testIsAvailableParams struct {
|
||||
options []Option
|
||||
blobKzgCommitmentsCount uint64
|
||||
@@ -2941,7 +2894,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
@@ -2954,7 +2906,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
t.Run("Fulu - no missing data columns", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
@@ -2969,7 +2920,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: []uint64{1, 17, 19, 75, 102, 117, 119}, // 119 is not needed, 42 and 87 are missing
|
||||
|
||||
blobKzgCommitmentsCount: 3,
|
||||
@@ -3006,6 +2957,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -3018,10 +2972,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
|
||||
|
||||
@@ -3030,12 +2980,14 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&custodyInfo), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
_, _, err := service.cfg.P2P.UpdateCustodyInfo(0, cgc)
|
||||
require.NoError(t, err)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
@@ -3067,6 +3019,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -3075,7 +3030,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
@@ -3217,7 +3172,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = &lightClient.Store{}
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
|
||||
var oldActualUpdate interfaces.LightClientOptimisticUpdate
|
||||
var err error
|
||||
@@ -3226,14 +3181,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
|
||||
require.NoError(t, s.processLightClientOptimisticUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
|
||||
|
||||
oldActualUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(
|
||||
lOld.Ctx,
|
||||
lOld.State.Slot(),
|
||||
lOld.State,
|
||||
lOld.Block,
|
||||
lOld.AttestedState,
|
||||
lOld.AttestedBlock,
|
||||
)
|
||||
oldActualUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(lOld.Ctx, lOld.State, lOld.Block, lOld.AttestedState, lOld.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check that the old update is saved
|
||||
@@ -3247,14 +3195,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
|
||||
require.NoError(t, s.processLightClientOptimisticUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
|
||||
|
||||
newActualUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(
|
||||
lNew.Ctx,
|
||||
lNew.State.Slot(),
|
||||
lNew.State,
|
||||
lNew.Block,
|
||||
lNew.AttestedState,
|
||||
lNew.AttestedBlock,
|
||||
)
|
||||
newActualUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lNew.Ctx, lNew.State, lNew.Block, lNew.AttestedState, lNew.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepNotEqual(t, newActualUpdate, oldActualUpdate, "new update should not be equal to old update")
|
||||
@@ -3307,39 +3248,39 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
expectReplace: true,
|
||||
},
|
||||
{
|
||||
name: "Old update is better - age - no supermajority",
|
||||
name: "Old update is better - finalized slot is higher",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
|
||||
newOptions: []util.LightClientOption{},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "Old update is better - age - both supermajority",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1), util.WithSupermajority()},
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "Old update is better - supermajority",
|
||||
oldOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
name: "Old update is better - attested slot is higher",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
newOptions: []util.LightClientOption{},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "New update is better - age - both supermajority",
|
||||
oldOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1), util.WithSupermajority()},
|
||||
name: "Old update is better - signature slot is higher",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedSignatureSlot(1)},
|
||||
newOptions: []util.LightClientOption{},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "New update is better - finalized slot is higher",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
expectReplace: true,
|
||||
},
|
||||
{
|
||||
name: "New update is better - age - no supermajority",
|
||||
name: "New update is better - attested slot is higher",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
expectReplace: true,
|
||||
},
|
||||
{
|
||||
name: "New update is better - supermajority",
|
||||
name: "New update is better - signature slot is higher",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedSignatureSlot(1)},
|
||||
expectReplace: true,
|
||||
},
|
||||
}
|
||||
@@ -3371,7 +3312,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = &lightClient.Store{}
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
|
||||
var actualOldUpdate, actualNewUpdate interfaces.LightClientFinalityUpdate
|
||||
var err error
|
||||
@@ -3382,15 +3323,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, s.processLightClientFinalityUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
|
||||
|
||||
// check that the old update is saved
|
||||
actualOldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
cfgOld.postState.Slot(),
|
||||
cfgOld.postState,
|
||||
cfgOld.roblock,
|
||||
lOld.AttestedState,
|
||||
lOld.AttestedBlock,
|
||||
lOld.FinalizedBlock,
|
||||
)
|
||||
actualOldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgOld.postState, cfgOld.roblock, lOld.AttestedState, lOld.AttestedBlock, lOld.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
oldUpdate := s.lcStore.LastFinalityUpdate()
|
||||
require.DeepEqual(t, actualOldUpdate, oldUpdate)
|
||||
@@ -3401,15 +3334,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, s.processLightClientFinalityUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
|
||||
|
||||
// check that the actual old update and the actual new update are different
|
||||
actualNewUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
cfgNew.postState.Slot(),
|
||||
cfgNew.postState,
|
||||
cfgNew.roblock,
|
||||
lNew.AttestedState,
|
||||
lNew.AttestedBlock,
|
||||
lNew.FinalizedBlock,
|
||||
)
|
||||
actualNewUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgNew.postState, cfgNew.roblock, lNew.AttestedState, lNew.AttestedBlock, lNew.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, actualOldUpdate, actualNewUpdate)
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.GetData().Slot + 1
|
||||
if err := slots.VerifyTime(s.genesisTime, nextSlot, disparity); err != nil {
|
||||
continue
|
||||
|
||||
@@ -300,15 +300,30 @@ func (s *Service) reportPostBlockProcessing(
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
|
||||
// Send finalization event
|
||||
go func() {
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
|
||||
// Insert finalized deposits into finalized deposit trie
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDepositsAndPrune(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
// Save a light client bootstrap for the finalized checkpoint
|
||||
go func() {
|
||||
err := s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not save light client bootstrap by block root")
|
||||
} else {
|
||||
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
|
||||
@@ -9,7 +9,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -19,6 +21,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -564,3 +567,48 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
defer reset()
|
||||
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
ctx := tr.ctx
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock))
|
||||
finalizedBlockRoot, err := l.FinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, l.FinalizedState, finalizedBlockRoot))
|
||||
|
||||
cp := l.AttestedState.FinalizedCheckpoint()
|
||||
require.DeepSSZEqual(t, finalizedBlockRoot, [32]byte(cp.Root))
|
||||
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
|
||||
|
||||
sss, err := s.cfg.BeaconDB.State(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sss)
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.FinalizedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, btst, b)
|
||||
require.Equal(t, b.Version(), testVersion)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
|
||||
}
|
||||
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
// (It is only a wrapper around ReceiveDataColumns.)
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -31,6 +30,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -97,7 +97,6 @@ type config struct {
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
SyncChecker Checker
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Checker is an interface used to determine if a node is in initial sync
|
||||
@@ -296,6 +295,20 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -516,6 +529,57 @@ func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
custodyRequirement := beaconConfig.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = beaconConfig.NumberOfColumns
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
@@ -532,3 +596,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -54,6 +55,7 @@ type mockBroadcaster struct {
|
||||
|
||||
type mockAccessor struct {
|
||||
mockBroadcaster
|
||||
mockCustodyManager
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
@@ -87,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar, _ ...chan<- bool) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -97,6 +99,43 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
// mockCustodyManager is a mock implementation of p2p.CustodyManager
|
||||
type mockCustodyManager struct {
|
||||
mut sync.RWMutex
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
dch.mut.Lock()
|
||||
defer dch.mut.Unlock()
|
||||
|
||||
dch.earliestAvailableSlot = earliestAvailableSlot
|
||||
dch.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ p2p.CustodyManager = (*mockCustodyManager)(nil)
|
||||
|
||||
type testServiceRequirements struct {
|
||||
ctx context.Context
|
||||
db db.Database
|
||||
|
||||
@@ -68,7 +68,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
log.WithError(err).Error("Failed to check builder status")
|
||||
} else {
|
||||
log.WithField("endpoint", s.c.NodeURL()).Info("Builder has been configured")
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,13 +102,13 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
return nil, err
|
||||
} else if n == params.BeaconConfig().PendingPartialWithdrawalsLimit && !isFullExitRequest {
|
||||
// if the PendingPartialWithdrawalsLimit is met, the user would have paid for a partial withdrawal that's not included
|
||||
log.Debugln("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
log.Debug("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
continue
|
||||
}
|
||||
|
||||
vIdx, exists := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(wr.ValidatorPubkey))
|
||||
if !exists {
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
log.WithField("validator", hexutil.Encode(wr.ValidatorPubkey)).Debug("Skipping execution layer withdrawal request, validator index not found")
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndexReadOnly(vIdx)
|
||||
@@ -120,23 +120,23 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
log.Debug("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch().AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/fork.md#upgrading-the-state
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/fork.md#upgrading-the-state
|
||||
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
|
||||
@@ -206,9 +206,9 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
|
||||
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
|
||||
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
|
||||
// value defined:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#configuration
|
||||
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
|
||||
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
func MinEpochsForBlockRequests() primitives.Epoch {
|
||||
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
|
||||
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)
|
||||
|
||||
@@ -292,7 +292,7 @@ func TestMinEpochsForBlockRequests(t *testing.T) {
|
||||
params.SetActiveTestCleanup(t, params.MainnetConfig())
|
||||
var expected primitives.Epoch = 33024
|
||||
// expected value of 33024 via spec commentary:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
|
||||
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
|
||||
//
|
||||
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||
|
||||
@@ -10,8 +10,12 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -39,6 +43,9 @@ go_test(
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -26,14 +26,12 @@ const ErrNotEnoughSyncCommitteeBits = "sync committee bits count is less than re
|
||||
|
||||
func NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
currentSlot primitives.Slot,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (interfaces.LightClientFinalityUpdate, error) {
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientFinalityUpdate, error) {
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -43,13 +41,11 @@ func NewLightClientFinalityUpdateFromBeaconState(
|
||||
|
||||
func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
currentSlot primitives.Slot,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (interfaces.LightClientOptimisticUpdate, error) {
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, currentSlot, state, block, attestedState, attestedBlock, nil)
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientOptimisticUpdate, error) {
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -66,7 +62,6 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
// if locally available (may be unavailable, e.g., when using checkpoint sync, or if it was pruned locally)
|
||||
func NewLightClientUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
currentSlot primitives.Slot,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
@@ -521,8 +516,7 @@ func ComputeWithdrawalsRoot(payload interfaces.ExecutionData) ([]byte, error) {
|
||||
func BlockToLightClientHeader(
|
||||
ctx context.Context,
|
||||
attestedBlockVersion int, // this is the version that the light client header should be in, based on the attested block.
|
||||
block interfaces.ReadOnlySignedBeaconBlock, // this block is either the attested block, or the finalized block.
|
||||
// in case of the latter, we might need to upgrade it to the attested block's version.
|
||||
block interfaces.ReadOnlySignedBeaconBlock, // this block is either the attested block, or the finalized block. in case of the latter, we might need to upgrade it to the attested block's version.
|
||||
) (interfaces.LightClientHeader, error) {
|
||||
if block.Version() > attestedBlockVersion {
|
||||
return nil, errors.Errorf("block version %s is greater than attested block version %s", version.String(block.Version()), version.String(attestedBlockVersion))
|
||||
@@ -755,3 +749,64 @@ func UpdateHasSupermajority(syncAggregate *pb.SyncAggregate) bool {
|
||||
numActiveParticipants := syncAggregate.SyncCommitteeBits.Count()
|
||||
return numActiveParticipants*3 >= maxActiveParticipants*2
|
||||
}
|
||||
|
||||
// IsFinalityUpdateValidForBroadcast checks if a finality update needs to be broadcasted.
|
||||
// It is also used to check if an incoming gossiped finality update is valid for forwarding and saving.
|
||||
func IsFinalityUpdateValidForBroadcast(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
return true
|
||||
}
|
||||
// The finalized_header.beacon.slot is greater than that of all previously forwarded finality_updates,
|
||||
// or it matches the highest previously forwarded slot and also has a sync_aggregate indicating supermajority (> 2/3)
|
||||
// sync committee participation while the previously forwarded finality_update for that slot did not indicate supermajority
|
||||
newUpdateSlot := newUpdate.FinalizedHeader().Beacon().Slot
|
||||
newHasSupermajority := UpdateHasSupermajority(newUpdate.SyncAggregate())
|
||||
|
||||
lastUpdateSlot := oldUpdate.FinalizedHeader().Beacon().Slot
|
||||
lastHasSupermajority := UpdateHasSupermajority(oldUpdate.SyncAggregate())
|
||||
|
||||
if newUpdateSlot < lastUpdateSlot {
|
||||
return false
|
||||
}
|
||||
if newUpdateSlot == lastUpdateSlot && (lastHasSupermajority || !newHasSupermajority) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsBetterFinalityUpdate checks if the new finality update is better than the old one for saving.
|
||||
// This does not concern broadcasting, but rather the decision of whether to save the new update.
|
||||
// For broadcasting checks, use IsFinalityUpdateValidForBroadcast.
|
||||
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot)
|
||||
newFinalizedSlot := newUpdate.FinalizedHeader().Beacon().Slot
|
||||
newAttestedSlot := newUpdate.AttestedHeader().Beacon().Slot
|
||||
|
||||
oldFinalizedSlot := oldUpdate.FinalizedHeader().Beacon().Slot
|
||||
oldAttestedSlot := oldUpdate.AttestedHeader().Beacon().Slot
|
||||
|
||||
if newFinalizedSlot < oldFinalizedSlot {
|
||||
return false
|
||||
}
|
||||
if newFinalizedSlot == oldFinalizedSlot {
|
||||
if newAttestedSlot < oldAttestedSlot {
|
||||
return false
|
||||
}
|
||||
if newAttestedSlot == oldAttestedSlot && newUpdate.SignatureSlot() <= oldUpdate.SignatureSlot() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func IsBetterOptimisticUpdate(newUpdate, oldUpdate interfaces.LightClientOptimisticUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
return true
|
||||
}
|
||||
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
|
||||
return newUpdate.AttestedHeader().Beacon().Slot > oldUpdate.AttestedHeader().Beacon().Slot
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot(), "Signature slot is not equal")
|
||||
@@ -46,7 +46,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Capella)
|
||||
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
@@ -59,7 +59,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Deneb)
|
||||
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
@@ -72,7 +72,7 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Electra)
|
||||
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
@@ -132,7 +132,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
|
||||
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Capella)
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
@@ -206,7 +206,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
|
||||
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Capella, util.WithFinalizedCheckpointInPrevFork())
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
@@ -240,7 +240,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Deneb)
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
@@ -315,7 +315,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Deneb, util.WithFinalizedCheckpointInPrevFork())
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
@@ -392,7 +392,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Electra)
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
@@ -467,7 +467,7 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, version.Electra, util.WithFinalizedCheckpointInPrevFork())
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
|
||||
@@ -4,7 +4,11 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -16,13 +20,17 @@ type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate // tracks the best finality update seen so far
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate // tracks the best optimistic update seen so far
|
||||
p2p p2p.Accessor
|
||||
stateFeed event.SubscriberSender
|
||||
}
|
||||
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase) *Store {
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase, p p2p.Accessor, e event.SubscriberSender) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
stateFeed: e,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,10 +50,31 @@ func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (i
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte, bootstrap interfaces.LightClientBootstrap) error {
|
||||
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
|
||||
state, err := s.beaconDB.State(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
if state == nil {
|
||||
return errors.Errorf("failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
// Save the light client bootstrap to the database
|
||||
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return err
|
||||
@@ -122,10 +151,23 @@ func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate) {
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
log.Debug("Saved new light client finality update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
@@ -134,10 +176,23 @@ func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate) {
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
log.Debug("Saved new light client optimistic update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
|
||||
|
||||
@@ -3,7 +3,10 @@ package light_client_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -21,22 +24,22 @@ func TestLightClientStore(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Initialize the light client store
|
||||
lcStore := &lightClient.Store{}
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), &p2pTesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
// Create test light client updates for Capella and Deneb
|
||||
lCapella := util.NewTestLightClient(t, version.Capella)
|
||||
opUpdateCapella, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State.Slot(), lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
|
||||
opUpdateCapella, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateCapella, "OptimisticUpdateCapella is nil")
|
||||
finUpdateCapella, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State.Slot(), lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
|
||||
finUpdateCapella, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateCapella, "FinalityUpdateCapella is nil")
|
||||
|
||||
lDeneb := util.NewTestLightClient(t, version.Deneb)
|
||||
opUpdateDeneb, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State.Slot(), lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
|
||||
opUpdateDeneb, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateDeneb, "OptimisticUpdateDeneb is nil")
|
||||
finUpdateDeneb, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State.Slot(), lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
|
||||
finUpdateDeneb, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateDeneb, "FinalityUpdateDeneb is nil")
|
||||
|
||||
@@ -45,24 +48,118 @@ func TestLightClientStore(t *testing.T) {
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get finality with Capella update. Optimistic update should be nil
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella)
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella, false)
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get optimistic with Capella update. Finality update should be Capella
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella, false)
|
||||
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with Deneb update
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb)
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb, false)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb, false)
|
||||
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with nil update
|
||||
lcStore.SetLastFinalityUpdate(nil)
|
||||
lcStore.SetLastOptimisticUpdate(nil)
|
||||
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), p2p, new(event.Feed))
|
||||
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update0, lcStore.LastFinalityUpdate()), "update0 should be better than nil")
|
||||
// update0 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update0, lcStore.LastFinalityUpdate()), "update0 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l1 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update1, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update1, update0), "update1 should be better than update0")
|
||||
// update1 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update1, lcStore.LastFinalityUpdate()), "update1 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 2 with same finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update2, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update2, update1), "update2 should be better than update1")
|
||||
// update2 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update2, lcStore.LastFinalityUpdate()), "update2 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l3 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(3), util.WithSupermajority())
|
||||
update3, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l3.Ctx, l3.State, l3.Block, l3.AttestedState, l3.AttestedBlock, l3.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update3, update2), "update3 should be better than update2")
|
||||
// update3 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update3, lcStore.LastFinalityUpdate()), "update3 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l4 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(1), util.WithSupermajority())
|
||||
update4, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l4.Ctx, l4.State, l4.Block, l4.AttestedState, l4.AttestedBlock, l4.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update4, update3), "update4 should be better than update3")
|
||||
// update4 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update4, lcStore.LastFinalityUpdate()), "update4 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l5 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update5, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l5.Ctx, l5.State, l5.Block, l5.AttestedState, l5.AttestedBlock, l5.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update5, update4), "update5 should be better than update4")
|
||||
// update5 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update5, lcStore.LastFinalityUpdate()), "update5 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l6 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(3))
|
||||
update6, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l6.Ctx, l6.State, l6.Block, l6.AttestedState, l6.AttestedBlock, l6.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update6, update5), "update6 should be better than update5")
|
||||
// update6 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update6, lcStore.LastFinalityUpdate()), "update6 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -53,7 +52,6 @@ go_test(
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -31,27 +31,20 @@ var (
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
type CustodyType int
|
||||
|
||||
const (
|
||||
Target CustodyType = iota
|
||||
Actual
|
||||
)
|
||||
|
||||
// CustodyGroups computes the custody groups the node should participate in for custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#get_custody_groups
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#get_custody_groups
|
||||
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error) {
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Check if the custody group count is larger than the number of custody groups.
|
||||
if custodyGroupCount > numberOfCustodyGroup {
|
||||
if custodyGroupCount > numberOfCustodyGroups {
|
||||
return nil, ErrCustodyGroupCountTooLarge
|
||||
}
|
||||
|
||||
// Shortcut if all custody groups are needed.
|
||||
if custodyGroupCount == numberOfCustodyGroup {
|
||||
custodyGroups := make([]uint64, 0, numberOfCustodyGroup)
|
||||
for i := range numberOfCustodyGroup {
|
||||
if custodyGroupCount == numberOfCustodyGroups {
|
||||
custodyGroups := make([]uint64, 0, numberOfCustodyGroups)
|
||||
for i := range numberOfCustodyGroups {
|
||||
custodyGroups = append(custodyGroups, i)
|
||||
}
|
||||
|
||||
@@ -73,7 +66,7 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the custody group ID.
|
||||
custodyGroup := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroup
|
||||
custodyGroup := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % numberOfCustodyGroups
|
||||
|
||||
// Add the custody group to the map.
|
||||
if !custodyGroupsMap[custodyGroup] {
|
||||
@@ -88,9 +81,6 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
// Increment the current ID.
|
||||
currentId.Add(currentId, one)
|
||||
}
|
||||
|
||||
// Sort the custody groups.
|
||||
slices.Sort[[]uint64](custodyGroups)
|
||||
}
|
||||
|
||||
// Final check.
|
||||
@@ -98,26 +88,29 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
return nil, errWrongComputedCustodyGroupCount
|
||||
}
|
||||
|
||||
// Sort the custody groups.
|
||||
slices.Sort[[]uint64](custodyGroups)
|
||||
|
||||
return custodyGroups, nil
|
||||
}
|
||||
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
|
||||
if custodyGroup >= numberOfCustodyGroup {
|
||||
if custodyGroup >= numberOfCustodyGroups {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroup
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
for i := range columnsPerGroup {
|
||||
column := numberOfCustodyGroup*i + custodyGroup
|
||||
column := numberOfCustodyGroups*i + custodyGroup
|
||||
columns = append(columns, column)
|
||||
}
|
||||
|
||||
@@ -127,7 +120,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#get_data_column_sidecars
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
@@ -151,7 +144,7 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsA
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
return nil, errors.Wrap(err, "merkle proof KZG commitments")
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := dataColumnsSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
@@ -176,19 +169,6 @@ func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
return columnIndex % numberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#custody-sampling
|
||||
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
|
||||
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
if ct == Actual {
|
||||
custodyGroupCount = custodyInfo.ActualGroupCount()
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
return max(samplesPerSlot, custodyGroupCount)
|
||||
}
|
||||
|
||||
// CustodyColumns computes the custody columns from the custody groups.
|
||||
func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
@@ -219,6 +199,7 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
// the KZG commitment includion proofs and cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars
|
||||
func dataColumnsSidecars(
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
blobKzgCommitments [][]byte,
|
||||
|
||||
@@ -17,8 +17,8 @@ func TestCustodyGroups(t *testing.T) {
|
||||
// --------------------------------------------
|
||||
// The happy path is unit tested in spec tests.
|
||||
// --------------------------------------------
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroup+1)
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroups+1)
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupCountTooLarge)
|
||||
}
|
||||
|
||||
@@ -26,8 +26,8 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
// --------------------------------------------
|
||||
// The happy path is unit tested in spec tests.
|
||||
// --------------------------------------------
|
||||
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroup)
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
_, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroups)
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
|
||||
}
|
||||
|
||||
@@ -104,62 +104,6 @@ func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupSamplingSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
custodyType peerdas.CustodyType
|
||||
validatorsCustodyRequirement uint64
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "target, lower than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 2,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "target, higher than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "actual, lower than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 3,
|
||||
toAdvertiseCustodyGroupCount: 4,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "actual, higher than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 100,
|
||||
toAdvertiseCustodyGroupCount: 101,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a custody info.
|
||||
custodyInfo := peerdas.CustodyInfo{}
|
||||
|
||||
// Set the validators custody requirement for target custody group count.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Compute the custody group sampling size.
|
||||
actual := custodyInfo.CustodyGroupSamplingSize(tc.custodyType)
|
||||
|
||||
// Check the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyColumns(t *testing.T) {
|
||||
t.Run("group too large", func(t *testing.T) {
|
||||
_, err := peerdas.CustodyColumns([]uint64{1_000_000})
|
||||
|
||||
@@ -4,45 +4,17 @@ import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// info contains all useful peerDAS related information regarding a peer.
|
||||
type (
|
||||
info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
targetCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
validatorsCustodyRequirement uint64
|
||||
}
|
||||
|
||||
toAdverstiseCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
value uint64
|
||||
}
|
||||
|
||||
CustodyInfo struct {
|
||||
// Mut is a mutex to be used by caller to ensure neither
|
||||
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
|
||||
// (This is not necessary to use this mutex for any data protection.)
|
||||
Mut sync.RWMutex
|
||||
|
||||
// TargetGroupCount represents the target number of custody groups we should custody
|
||||
// regarding the validators we are tracking.
|
||||
TargetGroupCount targetCustodyGroupCount
|
||||
|
||||
// ToAdvertiseGroupCount represents the number of custody groups to advertise to the network.
|
||||
ToAdvertiseGroupCount toAdverstiseCustodyGroupCount
|
||||
}
|
||||
)
|
||||
type info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
const (
|
||||
nodeInfoCacheSize = 200
|
||||
@@ -109,61 +81,6 @@ func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
|
||||
return result, false, nil
|
||||
}
|
||||
|
||||
// ActualGroupCount returns the actual custody group count.
|
||||
func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 {
|
||||
return min(custodyInfo.TargetGroupCount.Get(), custodyInfo.ToAdvertiseGroupCount.Get())
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the number of groups we should participate in for custody.
|
||||
func (tcgc *targetCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
tcgc.mut.RLock()
|
||||
defer tcgc.mut.RUnlock()
|
||||
|
||||
// If no validators are tracked, return the default custody requirement.
|
||||
if tcgc.validatorsCustodyRequirement == 0 {
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// Return the validators custody requirement.
|
||||
return tcgc.validatorsCustodyRequirement
|
||||
}
|
||||
|
||||
// setValidatorsCustodyRequirement sets the validators custody requirement.
|
||||
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
|
||||
tcgc.mut.Lock()
|
||||
defer tcgc.mut.Unlock()
|
||||
|
||||
tcgc.validatorsCustodyRequirement = value
|
||||
}
|
||||
|
||||
// Get returns the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
tacgc.mut.RLock()
|
||||
defer tacgc.mut.RUnlock()
|
||||
|
||||
return max(tacgc.value, custodyRequirement)
|
||||
}
|
||||
|
||||
// Set sets the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
|
||||
tacgc.mut.Lock()
|
||||
defer tacgc.mut.Unlock()
|
||||
|
||||
tacgc.value = value
|
||||
}
|
||||
|
||||
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
|
||||
func createInfoCacheIfNeeded() error {
|
||||
nodeInfoCacheMut.Lock()
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
@@ -26,108 +25,3 @@ func TestInfo(t *testing.T) {
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllColumns bool
|
||||
validatorsCustodyRequirement uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all data subnets",
|
||||
subscribeToAllColumns: true,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "no validators attached",
|
||||
subscribeToAllColumns: false,
|
||||
validatorsCustodyRequirement: 0,
|
||||
expected: 4,
|
||||
},
|
||||
{
|
||||
name: "some validators attached",
|
||||
subscribeToAllColumns: false,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllColumns {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the validators custody requirement.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Get the target custody group count.
|
||||
actual := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllColumns bool
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllColumns: true,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "higher than custody requirement",
|
||||
subscribeToAllColumns: false,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "lower than custody requirement",
|
||||
subscribeToAllColumns: false,
|
||||
toAdvertiseCustodyGroupCount: 1,
|
||||
expected: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllColumns {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Create a custody info.
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Get the to advertise custody group count.
|
||||
actual := custodyInfo.ToAdvertiseGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,10 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodyGroupCountEnrKey = "cgc"
|
||||
kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
)
|
||||
const kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
|
||||
var (
|
||||
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
@@ -27,13 +24,13 @@ var (
|
||||
ErrCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#custody-group-count
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#custody-group-count
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
|
||||
func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCountKey }
|
||||
|
||||
// VerifyDataColumnSidecar verifies if the data column sidecar is valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
@@ -60,7 +57,7 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
||||
// This is done to improve performance since the internal KZG library is way more
|
||||
// efficient when verifying in batch.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
@@ -96,7 +93,7 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
||||
return ErrNilBlockHeader
|
||||
@@ -128,7 +125,7 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
}
|
||||
|
||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
return columnIndex % dataColumnSidecarSubnetCount
|
||||
|
||||
@@ -123,7 +123,7 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
|
||||
// cell proofs corresponding the extended blobs. The main purpose of this function is to
|
||||
// construct data columns sidecars from data obtained from the execution client via:
|
||||
// construct data column sidecars from data obtained from the execution client via:
|
||||
// - `engine_getBlobsV2` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2, or
|
||||
// - `engine_getPayloadV5` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getpayloadv5
|
||||
// Note: In this function, to stick with the `BlobsBundleV2` format returned by the execution client in `engine_getPayloadV5`,
|
||||
@@ -222,8 +222,8 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
|
||||
// Check if the data column sidecars are aligned with the block.
|
||||
dataColumnSidecars := make([]blocks.RODataColumn, 0, len(verifiedDataColumnSidecars))
|
||||
for _, verifiedDataColumnSidecar := range verifiedDataColumnSidecars {
|
||||
dataColumnSicecar := verifiedDataColumnSidecar.RODataColumn
|
||||
dataColumnSidecars = append(dataColumnSidecars, dataColumnSicecar)
|
||||
dataColumnSidecar := verifiedDataColumnSidecar.RODataColumn
|
||||
dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar)
|
||||
}
|
||||
|
||||
if err := DataColumnsAlignWithBlock(block, dataColumnSidecars); err != nil {
|
||||
@@ -241,7 +241,7 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// We need to reconstruct the blobs.
|
||||
// We need to reconstruct the data column sidecars.
|
||||
reconstructedDataColumnSidecars, err := ReconstructDataColumnSidecars(verifiedDataColumnSidecars)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reconstruct data column sidecars")
|
||||
|
||||
@@ -196,6 +196,26 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
t.Run("consecutive duplicates", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
// [0, 1, 1, 3, 4, ...]
|
||||
verifiedRoSidecars[2] = verifiedRoSidecars[1]
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
t.Run("non-consecutive duplicates", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
// [0, 1, 2, 1, 4, ...]
|
||||
verifiedRoSidecars[3] = verifiedRoSidecars[1]
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
t.Run("not enough columns", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/validator.md#validator-custody
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
totalNodeBalance := uint64(0)
|
||||
for index := range validatorsIndex {
|
||||
@@ -21,10 +21,10 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroup := beaconConfig.NumberOfCustodyGroups
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
validatorCustodyRequirement := beaconConfig.ValidatorCustodyRequirement
|
||||
balancePerAdditionalCustodyGroup := beaconConfig.BalancePerAdditionalCustodyGroup
|
||||
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroup), nil
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
|
||||
}
|
||||
|
||||
@@ -39,10 +39,8 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -22,8 +22,8 @@ type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
@@ -38,13 +38,18 @@ type DataColumnsVerifier interface {
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, newDataColumnsVerifier verification.NewDataColumnsVerifier, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
nodeID enode.ID,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
custodyGroupCount uint64,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
custodyInfo: custodyInfo,
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custodyGroupCount: custodyGroupCount,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,11 +131,11 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, curre
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return errors.Wrap(err, "valid")
|
||||
return errors.Wrap(err, "valid fields")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
@@ -155,6 +160,8 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, curre
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
@@ -164,7 +171,7 @@ func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, bl
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current spoch.
|
||||
// Compute the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
@@ -183,11 +190,9 @@ func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, bl
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the groups count.
|
||||
custodyGroupCount := s.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
@@ -4,10 +4,8 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -29,7 +27,7 @@ var commitments = [][]byte{
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
@@ -44,7 +42,7 @@ func TestPersist(t *testing.T) {
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
@@ -59,7 +57,7 @@ func TestPersist(t *testing.T) {
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
@@ -76,7 +74,7 @@ func TestPersist(t *testing.T) {
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
@@ -114,7 +112,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -135,9 +133,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
@@ -221,14 +219,10 @@ func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -251,7 +251,7 @@ func (dcs *DataColumnStorage) Summary(root [fieldparams.RootLength]byte) DataCol
|
||||
}
|
||||
|
||||
// Save saves data column sidecars into the database and asynchronously performs pruning.
|
||||
// The returned chanel is closed when the pruning is complete.
|
||||
// The returned channel is closed when the pruning is complete.
|
||||
func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
@@ -266,8 +266,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
|
||||
return errWrongNumberOfColumns
|
||||
}
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
dataColumnSidecarsbyRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
dataColumnSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
|
||||
// Group data column sidecars by root.
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
@@ -278,23 +277,20 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
|
||||
|
||||
// Group data column sidecars by root.
|
||||
root := dataColumnSidecar.BlockRoot()
|
||||
dataColumnSidecarsbyRoot[root] = append(dataColumnSidecarsbyRoot[root], dataColumnSidecar)
|
||||
dataColumnSidecarsByRoot[root] = append(dataColumnSidecarsByRoot[root], dataColumnSidecar)
|
||||
}
|
||||
|
||||
for root, dataColumnSidecars := range dataColumnSidecarsbyRoot {
|
||||
for root, dataColumnSidecars := range dataColumnSidecarsByRoot {
|
||||
// Safety check all data column sidecars for this root are from the same slot.
|
||||
firstSlot := dataColumnSidecars[0].SignedBlockHeader.Header.Slot
|
||||
slot := dataColumnSidecars[0].Slot()
|
||||
for _, dataColumnSidecar := range dataColumnSidecars[1:] {
|
||||
if dataColumnSidecar.SignedBlockHeader.Header.Slot != firstSlot {
|
||||
if dataColumnSidecar.Slot() != slot {
|
||||
return errDataColumnSidecarsFromDifferentSlots
|
||||
}
|
||||
}
|
||||
|
||||
// Set the highest epoch.
|
||||
epoch := slots.ToEpoch(dataColumnSidecars[0].Slot())
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
// Save data columns in the filesystem.
|
||||
epoch := slots.ToEpoch(slot)
|
||||
if err := dcs.saveFilesystem(root, epoch, dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "save filesystem")
|
||||
}
|
||||
@@ -306,7 +302,7 @@ func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataCol
|
||||
}
|
||||
|
||||
// Compute the data columns ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: root, Epoch: slots.ToEpoch(dataColumnSidecars[0].Slot()), Indices: indices}
|
||||
dataColumnsIdent := DataColumnsIdent{Root: root, Epoch: epoch, Indices: indices}
|
||||
|
||||
// Set data columns in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
|
||||
@@ -20,7 +20,7 @@ File organisation
|
||||
The remaining 7 bits (from 0 to 127) represent the index of the data column.
|
||||
This sentinel bit is needed to distinguish between the column with index 0 and no column.
|
||||
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
|
||||
- The rest of the file is a repeat of the SSZ encoded data columns sidecars.
|
||||
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
|
||||
|
||||
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
@@ -75,7 +75,7 @@ data-columns
|
||||
|
||||
Computation of the maximum size of a DataColumnSidecar
|
||||
------------------------------------------------------
|
||||
https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#datacolumnsidecar
|
||||
https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar
|
||||
|
||||
|
||||
class DataColumnSidecar(Container):
|
||||
|
||||
@@ -33,6 +33,7 @@ type ReadOnlyDatabase interface {
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HighestRootsBelowSlot(ctx context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error)
|
||||
EarliestSlot(ctx context.Context) (primitives.Slot, error)
|
||||
// State related methods.
|
||||
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
@@ -56,12 +57,11 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// light client operations
|
||||
// Light client operations
|
||||
LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]interfaces.LightClientUpdate, error)
|
||||
LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error)
|
||||
LightClientBootstrap(ctx context.Context, blockRoot []byte) (interfaces.LightClientBootstrap, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
// Origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
}
|
||||
@@ -102,6 +102,10 @@ type NoHeadAccessDatabase interface {
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
}
|
||||
|
||||
// HeadAccessDatabase defines a struct with access to reading chain head data.
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"backup.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"custody.go",
|
||||
"deposit_contract.go",
|
||||
"encoding.go",
|
||||
"error.go",
|
||||
@@ -83,6 +84,7 @@ go_test(
|
||||
"backup_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"custody_test.go",
|
||||
"deposit_contract_test.go",
|
||||
"encoding_test.go",
|
||||
"execution_chain_test.go",
|
||||
|
||||
@@ -860,6 +860,47 @@ func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primi
|
||||
})
|
||||
}
|
||||
|
||||
// EarliestStoredSlot returns the earliest slot in the database.
|
||||
func (s *Store) EarliestSlot(ctx context.Context) (primitives.Slot, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.EarliestSlot")
|
||||
defer span.End()
|
||||
|
||||
earliestAvailableSlot := primitives.Slot(0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the root corresponding to the earliest available block.
|
||||
c := tx.Bucket(blockSlotIndicesBucket).Cursor()
|
||||
k, v := c.First()
|
||||
if k == nil || v == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
|
||||
// The genesis block may be indexed in this bucket, even if we started from a checkpoint.
|
||||
// Because of this, we check the next block. If the next block is still in the genesis epoch,
|
||||
// then we consider we have the whole chain.
|
||||
if slot != 0 {
|
||||
earliestAvailableSlot = slot
|
||||
}
|
||||
|
||||
k, v = c.Next()
|
||||
if k == nil || v == nil {
|
||||
// Only the genesis block is available.
|
||||
return nil
|
||||
}
|
||||
slot = bytesutil.BytesToSlotBigEndian(k)
|
||||
if slot < slotsPerEpoch {
|
||||
// We are still in the genesis epoch, so we consider we have the whole chain.
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot = slot
|
||||
return nil
|
||||
})
|
||||
|
||||
return earliestAvailableSlot, err
|
||||
}
|
||||
|
||||
type slotRoot struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
@@ -883,7 +924,7 @@ func (s *Store) slotRootsInRange(ctx context.Context, start, end primitives.Slot
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(key); ; /* rely on internal checks to exit */ k, v = c.Prev() {
|
||||
if len(k) == 0 && len(v) == 0 {
|
||||
// The `edge`` variable and this `if` deal with 2 edge cases:
|
||||
// The `edge` variable and this `if` deal with 2 edge cases:
|
||||
// - Seeking past the end of the bucket (the `end` param is higher than the highest slot).
|
||||
// - Seeking before the beginning of the bucket (the `start` param is lower than the lowest slot).
|
||||
// In both of these cases k,v will be nil and we can handle the same way using `edge` to
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -1327,3 +1328,86 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
want := errors.Wrap(ErrNotFoundFeeRecipient, "validator id 3")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
}
|
||||
|
||||
// Block creates a phase0 beacon block at the specified slot and saves it to the database.
|
||||
func createAndSaveBlock(t *testing.T, ctx context.Context, db *Store, slot primitives.Slot) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = slot
|
||||
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrappedBlock))
|
||||
}
|
||||
|
||||
func TestStore_EarliestSlot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("empty database returns ErrNotFound", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with only genesis block", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with genesis and blocks in genesis epoch", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
// Create and save a block in the genesis epoch
|
||||
createAndSaveBlock(t, ctx, db, primitives.Slot(slotsPerEpoch-1))
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with genesis and blocks beyond genesis epoch", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
// Create and save a block beyond the genesis epoch
|
||||
nextEpochSlot := primitives.Slot(slotsPerEpoch)
|
||||
createAndSaveBlock(t, ctx, db, nextEpochSlot)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nextEpochSlot, slot)
|
||||
})
|
||||
|
||||
t.Run("database starting from checkpoint (non-zero earliest slot)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Simulate starting from a checkpoint by creating blocks starting from a later slot
|
||||
checkpointSlot := primitives.Slot(slotsPerEpoch * 10) // 10 epochs later
|
||||
nextEpochSlot := checkpointSlot + slotsPerEpoch
|
||||
|
||||
// Create and save first block at checkpoint slot
|
||||
createAndSaveBlock(t, ctx, db, checkpointSlot)
|
||||
|
||||
// Create and save another block in the next epoch
|
||||
createAndSaveBlock(t, ctx, db, nextEpochSlot)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nextEpochSlot, slot)
|
||||
})
|
||||
}
|
||||
|
||||
129
beacon-chain/db/kv/custody.go
Normal file
129
beacon-chain/db/kv/custody.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// UpdateCustodyInfo atomically updates the custody group count only it is greater than the stored one.
|
||||
// In this case, it also updates the earliest available slot with the provided value.
|
||||
// It returns the (potentially updated) custody group count and earliest available slot.
|
||||
func (s *Store) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateCustodyInfo")
|
||||
defer span.End()
|
||||
|
||||
storedGroupCount, storedEarliestAvailableSlot := uint64(0), primitives.Slot(0)
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
// Retrieve the stored custody group count.
|
||||
storedGroupCountBytes := bucket.Get(groupCountKey)
|
||||
if len(storedGroupCountBytes) != 0 {
|
||||
storedGroupCount = bytesutil.BytesToUint64BigEndian(storedGroupCountBytes)
|
||||
}
|
||||
|
||||
// Retrieve the stored earliest available slot.
|
||||
storedEarliestAvailableSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(storedEarliestAvailableSlotBytes) != 0 {
|
||||
storedEarliestAvailableSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(storedEarliestAvailableSlotBytes))
|
||||
}
|
||||
|
||||
// Exit early if the new custody group count is lower than or equal to the stored one.
|
||||
if custodyGroupCount <= storedGroupCount {
|
||||
return nil
|
||||
}
|
||||
|
||||
storedGroupCount, storedEarliestAvailableSlot = custodyGroupCount, earliestAvailableSlot
|
||||
|
||||
// Store the earliest available slot.
|
||||
bytes := bytesutil.Uint64ToBytesBigEndian(uint64(earliestAvailableSlot))
|
||||
if err := bucket.Put(earliestAvailableSlotKey, bytes); err != nil {
|
||||
return errors.Wrap(err, "put earliest available slot")
|
||||
}
|
||||
|
||||
// Store the custody group count.
|
||||
bytes = bytesutil.Uint64ToBytesBigEndian(custodyGroupCount)
|
||||
if err := bucket.Put(groupCountKey, bytes); err != nil {
|
||||
return errors.Wrap(err, "put custody group count")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"earliestAvailableSlot": storedEarliestAvailableSlot,
|
||||
"groupCount": storedGroupCount,
|
||||
}).Debug("Custody info")
|
||||
|
||||
return storedEarliestAvailableSlot, storedGroupCount, nil
|
||||
}
|
||||
|
||||
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
|
||||
// only if `subscribed` is `true`.
|
||||
// It returns the previous subscription status.
|
||||
func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateSubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
|
||||
result := false
|
||||
if !subscribed {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subscribe all data subnets flag.
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) != 0 && bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
if err := bucket.Put(subscribeAllDataSubnetsKey, []byte{1}); err != nil {
|
||||
return errors.Wrap(err, "put subscribe all data subnets")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
176
beacon-chain/db/kv/custody_test.go
Normal file
176
beacon-chain/db/kv/custody_test.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// getCustodyInfoFromDB reads the custody info directly from the database for testing purposes.
|
||||
func getCustodyInfoFromDB(t *testing.T, db *Store) (primitives.Slot, uint64) {
|
||||
t.Helper()
|
||||
var earliestSlot primitives.Slot
|
||||
var groupCount uint64
|
||||
|
||||
err := db.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read group count
|
||||
groupCountBytes := bucket.Get(groupCountKey)
|
||||
if len(groupCountBytes) != 0 {
|
||||
groupCount = bytesutil.BytesToUint64BigEndian(groupCountBytes)
|
||||
}
|
||||
|
||||
// Read earliest available slot
|
||||
earliestSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(earliestSlotBytes) != 0 {
|
||||
earliestSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(earliestSlotBytes))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return earliestSlot, groupCount
|
||||
}
|
||||
|
||||
// getSubscriptionStatusFromDB reads the subscription status directly from the database for testing purposes.
|
||||
func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
t.Helper()
|
||||
var subscribed bool
|
||||
|
||||
err := db.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) != 0 && bytes[0] == 1 {
|
||||
subscribed = true
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return subscribed
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("initial update with empty database", func(t *testing.T) {
|
||||
const (
|
||||
earliestSlot = primitives.Slot(100)
|
||||
groupCount = uint64(5)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestSlot, slot)
|
||||
require.Equal(t, groupCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, earliestSlot, storedSlot)
|
||||
require.Equal(t, groupCount, storedCount)
|
||||
})
|
||||
|
||||
t.Run("update with higher group count", func(t *testing.T) {
|
||||
const (
|
||||
initialSlot = primitives.Slot(100)
|
||||
initialCount = uint64(5)
|
||||
earliestSlot = primitives.Slot(200)
|
||||
groupCount = uint64(10)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
_, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestSlot, slot)
|
||||
require.Equal(t, groupCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, earliestSlot, storedSlot)
|
||||
require.Equal(t, groupCount, storedCount)
|
||||
})
|
||||
|
||||
t.Run("update with lower group count should not update", func(t *testing.T) {
|
||||
const (
|
||||
initialSlot = primitives.Slot(200)
|
||||
initialCount = uint64(10)
|
||||
earliestSlot = primitives.Slot(300)
|
||||
groupCount = uint64(8)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
_, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, initialSlot, slot)
|
||||
require.Equal(t, initialCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, initialSlot, storedSlot)
|
||||
require.Equal(t, initialCount, storedCount)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateSubscribedToAllDataSubnets(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("initial update with empty database - set to false", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, false, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
}
|
||||
@@ -123,6 +123,7 @@ var Buckets = [][]byte{
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
custodyBucket,
|
||||
}
|
||||
|
||||
// KVStoreOption is a functional option that modifies a kv.Store.
|
||||
|
||||
@@ -70,4 +70,10 @@ var (
|
||||
|
||||
// Migrations
|
||||
migrationsBucket = []byte("migrations")
|
||||
|
||||
// Custody
|
||||
custodyBucket = []byte("custody")
|
||||
groupCountKey = []byte("group-count")
|
||||
earliestAvailableSlotKey = []byte("earliest-available-slot")
|
||||
subscribeAllDataSubnetsKey = []byte("subscribe-all-data-subnets")
|
||||
)
|
||||
|
||||
@@ -43,6 +43,7 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return errors.Wrap(err, "failed to initialize origin block w/ bytes + config+fork")
|
||||
}
|
||||
blk := wblk.Block()
|
||||
slot := blk.Slot()
|
||||
|
||||
blockRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -51,43 +52,43 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
|
||||
pr := blk.ParentRoot()
|
||||
bf := &dbval.BackfillStatus{
|
||||
LowSlot: uint64(wblk.Block().Slot()),
|
||||
LowSlot: uint64(slot),
|
||||
LowRoot: blockRoot[:],
|
||||
LowParentRoot: pr[:],
|
||||
OriginRoot: blockRoot[:],
|
||||
OriginSlot: uint64(wblk.Block().Slot()),
|
||||
OriginSlot: uint64(slot),
|
||||
}
|
||||
|
||||
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
|
||||
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
|
||||
}
|
||||
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint block to db")
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint data into database")
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
return errors.Wrap(err, "save block")
|
||||
}
|
||||
|
||||
// save state
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Info("Calling SaveState")
|
||||
if err = s.SaveState(ctx, state, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
return errors.Wrap(err, "save state")
|
||||
}
|
||||
|
||||
if err = s.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: state.Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not save state summary")
|
||||
return errors.Wrap(err, "save state summary")
|
||||
}
|
||||
|
||||
// mark block as head of chain, so that processing will pick up from this point
|
||||
if err = s.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
return errors.Wrap(err, "save head block root")
|
||||
}
|
||||
|
||||
// save origin block root in a special key, to be used when the canonical
|
||||
// origin (start of chain, ie alternative to genesis) block or state is needed
|
||||
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save origin block root")
|
||||
return errors.Wrap(err, "save origin checkpoint block root")
|
||||
}
|
||||
|
||||
// rebuild the checkpoint from the block
|
||||
@@ -96,15 +97,18 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chkpt := ðpb.Checkpoint{
|
||||
Epoch: primitives.Epoch(slotEpoch),
|
||||
Root: blockRoot[:],
|
||||
}
|
||||
|
||||
if err = s.SaveJustifiedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as justified")
|
||||
return errors.Wrap(err, "save justified checkpoint")
|
||||
}
|
||||
|
||||
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as finalized")
|
||||
return errors.Wrap(err, "save finalized checkpoint")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -23,7 +23,6 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
@@ -124,7 +123,6 @@ type BeaconNode struct {
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
@@ -166,7 +164,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
|
||||
}
|
||||
|
||||
@@ -236,7 +233,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db)
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db, beacon.fetchP2P(), beacon.StateFeed())
|
||||
}
|
||||
|
||||
return beacon, nil
|
||||
@@ -699,6 +696,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
DiscoveryDir: filepath.Join(dataDir, "discovery"),
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
@@ -716,7 +714,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -799,7 +796,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithCustodyInfo(b.custodyInfo),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
@@ -887,7 +883,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithCustodyInfo(b.custodyInfo),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
regularsync.WithBatchVerifierLimit(b.cliCtx.Int(flags.BatchVerifierLimit.Name)),
|
||||
|
||||
@@ -104,6 +104,7 @@ go_library(
|
||||
"@com_github_libp2p_go_mplex//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -146,7 +147,6 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
@@ -173,7 +173,6 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -24,6 +24,8 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const minimumPeersPerSubnetForBroadcast = 1
|
||||
|
||||
// ErrMessageNotMapped occurs on a Broadcast attempt when a message has not been defined in the
|
||||
// GossipTypeMapping.
|
||||
var ErrMessageNotMapped = errors.New("message type is not mapped to a PubSub topic")
|
||||
@@ -124,15 +126,13 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
if err := func() error {
|
||||
s.subnetLocker(subnet).Lock()
|
||||
defer s.subnetLocker(subnet).Unlock()
|
||||
ok, err := s.FindPeersWithSubnet(ctx, attestationToTopic(subnet, forkDigest), subnet, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, AttestationSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
if ok {
|
||||
savedAttestationBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
|
||||
savedAttestationBroadcasts.Inc()
|
||||
return nil
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -183,15 +183,12 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
ok, err := s.FindPeersWithSubnet(ctx, syncCommitteeToTopic(subnet, forkDigest), subnet, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, SyncCommitteeSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
if ok {
|
||||
savedSyncCommitteeBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
|
||||
savedSyncCommitteeBroadcasts.Inc()
|
||||
return nil
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -250,15 +247,13 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
ok, err := s.FindPeersWithSubnet(ctx, blobSubnetToTopic(subnet, forkDigest), subnet, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, BlobSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
if ok {
|
||||
blobSidecarBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
|
||||
blobSidecarBroadcasts.Inc()
|
||||
return nil
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -329,7 +324,6 @@ func (s *Service) BroadcastDataColumn(
|
||||
root [fieldparams.RootLength]byte,
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
peersCheckedChans ...chan<- bool, // Used for testing purposes to signal when peers are checked.
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumn")
|
||||
@@ -349,7 +343,7 @@ func (s *Service) BroadcastDataColumn(
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest, peersCheckedChans)
|
||||
go s.internalBroadcastDataColumn(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -360,7 +354,6 @@ func (s *Service) internalBroadcastDataColumn(
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
peersCheckedChans []chan<- bool, // Used for testing purposes to signal when peers are checked.
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
@@ -382,7 +375,7 @@ func (s *Service) internalBroadcastDataColumn(
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Find peers if needed.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, topic, columnSubnet, peersCheckedChans); err != nil {
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, columnSubnet); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers for data column subnet")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
@@ -416,35 +409,19 @@ func (s *Service) internalBroadcastDataColumn(
|
||||
func (s *Service) findPeersIfNeeded(
|
||||
ctx context.Context,
|
||||
wrappedSubIdx uint64,
|
||||
topic string,
|
||||
topicFormat string,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
subnet uint64,
|
||||
peersCheckedChans []chan<- bool, // Used for testing purposes to signal when peers are checked.
|
||||
) error {
|
||||
// Sending a data column sidecar to only one peer is not ideal,
|
||||
// but it ensures at least one peer receives it.
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
// Sending a data column sidecar to only one peer is not ideal,
|
||||
// but it ensures at least one peer receives it.
|
||||
const peerCount = 1
|
||||
|
||||
if s.hasPeerWithSubnet(topic) {
|
||||
// Exit early if we already have peers with this subnet.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Used for testing purposes.
|
||||
if len(peersCheckedChans) > 0 {
|
||||
peersCheckedChans[0] <- true
|
||||
}
|
||||
|
||||
// No peers found, attempt to find peers with this subnet.
|
||||
ok, err := s.FindPeersWithSubnet(ctx, topic, subnet, peerCount)
|
||||
if err != nil {
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, topicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnet")
|
||||
}
|
||||
if !ok {
|
||||
return errors.Errorf("failed to find peers for topic %s with subnet %d", topic, subnet)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
@@ -24,7 +23,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -216,9 +214,10 @@ func TestService_BroadcastAttestation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
const port = uint(2000)
|
||||
|
||||
// Setup bootnode.
|
||||
cfg := &Config{PingInterval: testPingInterval}
|
||||
port := 2000
|
||||
cfg.UDPPort = uint(port)
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
@@ -228,6 +227,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -245,7 +245,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
PingInterval: testPingInterval,
|
||||
}
|
||||
// Setup 2 different hosts
|
||||
for i := 1; i <= 2; i++ {
|
||||
for i := uint(1); i <= 2; i++ {
|
||||
h, pkey, ipAddr := createHost(t, port+i)
|
||||
cfg.UDPPort = uint(port + i)
|
||||
cfg.TCPPort = uint(port + i)
|
||||
@@ -256,6 +256,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
// Set for 2nd peer
|
||||
@@ -545,8 +546,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
msg, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
msg, err := util.MockOptimisticUpdate()
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
@@ -612,8 +612,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
msg, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
msg, err := util.MockFinalityUpdate()
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
@@ -687,7 +686,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
// Create a host.
|
||||
_, pkey, ipAddr := createHost(t, port)
|
||||
|
||||
p := &Service{
|
||||
service := &Service{
|
||||
ctx: t.Context(),
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
@@ -698,59 +697,48 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
// Create a listener.
|
||||
listener, err := p.startDiscoveryV5(ipAddr, pkey)
|
||||
listener, err := service.startDiscoveryV5(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
|
||||
p.dv5Listener = listener
|
||||
service.dv5Listener = listener
|
||||
|
||||
digest, err := p.currentForkDigest()
|
||||
digest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
|
||||
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
sidecar := roSidecars[0].DataColumnSidecar
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
peersChecked := make(chan bool, 0)
|
||||
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Wait for the peers to be checked.
|
||||
<-peersChecked
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(tt, err)
|
||||
|
||||
msg, err := sub.Next(ctx)
|
||||
require.NoError(tt, err)
|
||||
|
||||
var result ethpb.DataColumnSidecar
|
||||
require.NoError(tt, p.Encoding().DecodeGossip(msg.Data, &result))
|
||||
require.DeepEqual(tt, &result, sidecar)
|
||||
}(t)
|
||||
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
err = p.BroadcastDataColumn(emptyRoot, subnet, nil)
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, nil)
|
||||
require.ErrorContains(t, "attempted to broadcast nil", err)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = p.BroadcastDataColumn(emptyRoot, subnet, sidecar, peersChecked)
|
||||
// Subscribe to the topic.
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Minute), "Failed to receive pubsub within 1s")
|
||||
|
||||
// libp2p fails without this delay
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
msg, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
var result ethpb.DataColumnSidecar
|
||||
require.NoError(t, service.Encoding().DecodeGossip(msg.Data, &result))
|
||||
require.DeepEqual(t, &result, sidecar)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"time"
|
||||
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
)
|
||||
@@ -27,6 +26,7 @@ type Config struct {
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
MetaDataDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
@@ -39,7 +39,6 @@ type Config struct {
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
@@ -19,8 +19,7 @@ const (
|
||||
// Burst limit for inbound dials.
|
||||
ipBurst = 8
|
||||
|
||||
// High watermark buffer signifies the buffer till which
|
||||
// we will handle inbound requests.
|
||||
// High watermark buffer signifies the buffer till which we will handle inbound requests.
|
||||
highWatermarkBuffer = 20
|
||||
)
|
||||
|
||||
@@ -53,7 +52,7 @@ func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
|
||||
"reason": "exceeded dial limit"}).Trace("Not accepting inbound dial from ip address")
|
||||
return false
|
||||
}
|
||||
if s.isPeerAtLimit(true /* inbound */) {
|
||||
if s.isPeerAtLimit(inbound) {
|
||||
log.WithFields(logrus.Fields{"peer": n.RemoteMultiaddr(),
|
||||
"reason": "at peer limit"}).Trace("Not accepting inbound dial")
|
||||
return false
|
||||
|
||||
@@ -3,11 +3,114 @@ package p2p
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ DataColumnsHandler = (*Service)(nil)
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
func (s *Service) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
}
|
||||
|
||||
return s.custodyInfo.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the custody group count.
|
||||
func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
}
|
||||
|
||||
return s.custodyInfo.groupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo updates the stored custody group count to the incoming one
|
||||
// if the incoming one is greater than the stored one. In this case, the
|
||||
// incoming earliest available slot should be greater than or equal to the
|
||||
// stored one or an error is returned.
|
||||
//
|
||||
// - If there is no stored custody info, or
|
||||
// - If the incoming earliest available slot is greater than or equal to the
|
||||
// fulu fork slot and the incoming custody group count is greater than the
|
||||
// number of samples per slot
|
||||
//
|
||||
// then the stored earliest available slot is updated to the incoming one.
|
||||
//
|
||||
// This function returns a boolean indicating whether the custody info was
|
||||
// updated and the (possibly updated) custody info itself.
|
||||
//
|
||||
// Rationale:
|
||||
// - The custody group count can only be increased (specification)
|
||||
// - If the custody group count is increased before Fulu, we can still serve
|
||||
// all the data, since there is no sharding before Fulu. As a consequence
|
||||
// we do not need to update the earliest available slot in this case.
|
||||
// - If the custody group count is increased after Fulu, but to a value less
|
||||
// than or equal to the number of samples per slot, we can still serve all
|
||||
// the data, since we store all sampled data column sidecars in all cases.
|
||||
// As a consequence, we do not need to update the earliest available slot
|
||||
// - If the custody group count is increased after Fulu to a value higher than
|
||||
// the number of samples per slot, then, until the backfill is complete, we
|
||||
// are unable to serve the data column sidecars corresponding to the new
|
||||
// custody groups. As a consequence, we need to update the earliest
|
||||
// available slot to inform the peers that we are not able to serve data
|
||||
// column sidecars before this point.
|
||||
func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
s.custodyInfo = &custodyInfo{
|
||||
earliestAvailableSlot: earliestAvailableSlot,
|
||||
groupCount: custodyGroupCount,
|
||||
}
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
inMemory := s.custodyInfo
|
||||
if custodyGroupCount <= inMemory.groupCount {
|
||||
return inMemory.earliestAvailableSlot, inMemory.groupCount, nil
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < inMemory.earliestAvailableSlot {
|
||||
return 0, 0, errors.Errorf(
|
||||
"earliest available slot %d is less than the current one %d. (custody group count: %d, current one: %d)",
|
||||
earliestAvailableSlot, inMemory.earliestAvailableSlot, custodyGroupCount, inMemory.groupCount,
|
||||
)
|
||||
}
|
||||
|
||||
if custodyGroupCount <= samplesPerSlot {
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return inMemory.earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < fuluForkSlot {
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return inMemory.earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
inMemory.earliestAvailableSlot = earliestAvailableSlot
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
// It first tries to get the custody group count from the peer's metadata,
|
||||
@@ -72,3 +175,19 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
|
||||
return custodyGroupCount
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
@@ -15,6 +18,174 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
func TestEarliestAvailableSlot(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
|
||||
_, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
slot, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupCount(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
|
||||
_, err := service.CustodyGroupCount()
|
||||
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), "no custody info available"))
|
||||
})
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected uint64 = 5
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
count, err := service.CustodyGroupCount()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SamplesPerSlot = 8
|
||||
config.FuluForkEpoch = 10
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
initialCustodyInfo *custodyInfo
|
||||
inputSlot primitives.Slot
|
||||
inputGroupCount uint64
|
||||
expectedUpdated bool
|
||||
expectedSlot primitives.Slot
|
||||
expectedGroupCount uint64
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "First time setting custody info",
|
||||
initialCustodyInfo: nil,
|
||||
inputSlot: 100,
|
||||
inputGroupCount: 5,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 100,
|
||||
expectedGroupCount: 5,
|
||||
},
|
||||
{
|
||||
name: "Group count decrease - no update",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 10,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 8,
|
||||
expectedUpdated: false,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 10,
|
||||
},
|
||||
{
|
||||
name: "Earliest slot decrease - error",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 100,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 50,
|
||||
inputGroupCount: 10,
|
||||
expectedErr: "earliest available slot 50 is less than the current one 100",
|
||||
},
|
||||
{
|
||||
name: "Group count increase but <= samples per slot",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 8,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 8,
|
||||
},
|
||||
{
|
||||
name: "Group count increase > samples per slot, before Fulu fork",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 15,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 15,
|
||||
},
|
||||
{
|
||||
name: "Group count increase > samples per slot, after Fulu fork",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 500,
|
||||
inputGroupCount: 15,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 500,
|
||||
expectedGroupCount: 15,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
}
|
||||
|
||||
slot, groupCount, err := service.UpdateCustodyInfo(tc.inputSlot, tc.inputGroupCount)
|
||||
|
||||
if tc.expectedErr != "" {
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), tc.expectedErr))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSlot, slot)
|
||||
require.Equal(t, tc.expectedGroupCount, groupCount)
|
||||
|
||||
if tc.expectedUpdated {
|
||||
require.NotNil(t, service.custodyInfo)
|
||||
require.Equal(t, tc.expectedSlot, service.custodyInfo.earliestAvailableSlot)
|
||||
require.Equal(t, tc.expectedGroupCount, service.custodyInfo.groupCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
@@ -109,3 +280,59 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromPeerENR(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
pid = "test-id"
|
||||
)
|
||||
|
||||
cgc := peerdas.Cgc(expectedENR)
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
record *enr.Record
|
||||
expected uint64
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "No ENR record",
|
||||
record: nil,
|
||||
expected: custodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "Empty ENR record",
|
||||
record: &enr.Record{},
|
||||
expected: custodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "Valid ENR with custody group count",
|
||||
record: func() *enr.Record {
|
||||
record := &enr.Record{}
|
||||
record.Set(cgc)
|
||||
return record
|
||||
}(),
|
||||
expected: expectedENR,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
if tc.record != nil {
|
||||
peers.Add(tc.record, pid, nil, network.DirOutbound)
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
}
|
||||
|
||||
actual := service.custodyGroupCountFromPeerENR(pid)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,9 @@ package p2p
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"math"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -26,42 +28,54 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ListenerRebooter interface {
|
||||
Listener
|
||||
RebootListener() error
|
||||
}
|
||||
type (
|
||||
// ListenerRebooter is an interface that extends the Listener interface
|
||||
// with the `RebootListener` method.
|
||||
ListenerRebooter interface {
|
||||
Listener
|
||||
RebootListener() error
|
||||
}
|
||||
|
||||
// Listener defines the discovery V5 network interface that is used
|
||||
// to communicate with other peers.
|
||||
type Listener interface {
|
||||
Self() *enode.Node
|
||||
Close()
|
||||
Lookup(enode.ID) []*enode.Node
|
||||
Resolve(*enode.Node) *enode.Node
|
||||
RandomNodes() enode.Iterator
|
||||
Ping(*enode.Node) error
|
||||
RequestENR(*enode.Node) (*enode.Node, error)
|
||||
LocalNode() *enode.LocalNode
|
||||
}
|
||||
// Listener defines the discovery V5 network interface that is used
|
||||
// to communicate with other peers.
|
||||
Listener interface {
|
||||
Self() *enode.Node
|
||||
Close()
|
||||
Lookup(enode.ID) []*enode.Node
|
||||
Resolve(*enode.Node) *enode.Node
|
||||
RandomNodes() enode.Iterator
|
||||
Ping(*enode.Node) error
|
||||
RequestENR(*enode.Node) (*enode.Node, error)
|
||||
LocalNode() *enode.LocalNode
|
||||
}
|
||||
|
||||
const (
|
||||
udp4 = iota
|
||||
udp6
|
||||
quicProtocol uint16
|
||||
|
||||
listenerWrapper struct {
|
||||
mu sync.RWMutex
|
||||
listener *discover.UDPv5
|
||||
listenerCreator func() (*discover.UDPv5, error)
|
||||
}
|
||||
|
||||
connectivityDirection int
|
||||
udpVersion int
|
||||
)
|
||||
|
||||
const quickProtocolEnrKey = "quic"
|
||||
|
||||
type quicProtocol uint16
|
||||
const (
|
||||
udp4 udpVersion = iota
|
||||
udp6
|
||||
)
|
||||
|
||||
const (
|
||||
inbound connectivityDirection = iota
|
||||
all
|
||||
)
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
|
||||
type listenerWrapper struct {
|
||||
mu sync.RWMutex
|
||||
listener *discover.UDPv5
|
||||
listenerCreator func() (*discover.UDPv5, error)
|
||||
}
|
||||
|
||||
func newListener(listenerCreator func() (*discover.UDPv5, error)) (*listenerWrapper, error) {
|
||||
rawListener, err := listenerCreator()
|
||||
if err != nil {
|
||||
@@ -222,22 +236,40 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
custodyGroupCount, inRecordCustodyGroupCount := uint64(0), uint64(0)
|
||||
if params.FuluEnabled() {
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err = peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody group count")
|
||||
return
|
||||
}
|
||||
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody group count")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// We add `1` to the current epoch because we want to prepare one epoch before the Fulu fork.
|
||||
if currentEpoch+1 < fuluForkEpoch {
|
||||
// Is our custody group count record up to date?
|
||||
isCustodyGroupCountUpToDate := custodyGroupCount == inRecordCustodyGroupCount
|
||||
|
||||
// Altair behaviour.
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate && (!params.FuluEnabled() || isCustodyGroupCountUpToDate) {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS, custodyGroupCount)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
@@ -245,16 +277,6 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current custody group count.
|
||||
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
|
||||
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody subnet count")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the custody group count in our metadata.
|
||||
inMetadataCustodyGroupCount := s.Metadata().CustodyGroupCount()
|
||||
|
||||
@@ -276,29 +298,10 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
const (
|
||||
minLogInterval = 1 * time.Minute
|
||||
thresholdLimit = 5
|
||||
searchPeriod = 20 * time.Second
|
||||
)
|
||||
|
||||
peersSummary := func(threshold uint) (uint, uint) {
|
||||
// Retrieve how many active peers we have.
|
||||
activePeers := s.Peers().Active()
|
||||
activePeerCount := uint(len(activePeers))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
if activePeerCount >= threshold {
|
||||
return activePeerCount, 0
|
||||
}
|
||||
|
||||
missingPeerCount := threshold - activePeerCount
|
||||
|
||||
return activePeerCount, missingPeerCount
|
||||
}
|
||||
|
||||
var lastLogTime time.Time
|
||||
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
defer iterator.Close()
|
||||
connectivityTicker := time.NewTicker(1 * time.Minute)
|
||||
thresholdCount := 0
|
||||
|
||||
@@ -330,74 +333,148 @@ func (s *Service) listenForNewNodes() {
|
||||
continue
|
||||
}
|
||||
|
||||
iterator = s.dv5Listener.RandomNodes()
|
||||
thresholdCount = 0
|
||||
}
|
||||
default:
|
||||
if s.isPeerAtLimit(false /* inbound */) {
|
||||
// Pause the main loop for a period to stop looking
|
||||
// for new peers.
|
||||
if s.isPeerAtLimit(all) {
|
||||
// Pause the main loop for a period to stop looking for new peers.
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
// Compute the number of new peers we want to dial.
|
||||
activePeerCount, missingPeerCount := peersSummary(s.cfg.MaxPeers)
|
||||
|
||||
fields := logrus.Fields{
|
||||
"currentPeerCount": activePeerCount,
|
||||
"targetPeerCount": s.cfg.MaxPeers,
|
||||
// Return early if the discovery listener isn't set.
|
||||
if s.dv5Listener == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if missingPeerCount == 0 {
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, searchPeriod)
|
||||
defer cancel()
|
||||
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithFields(fields).Debug("Searching for new active peers")
|
||||
}
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials := uint(flags.Get().MaxConcurrentDials)
|
||||
missingPeerCount = min(missingPeerCount, maxConcurrentDials)
|
||||
}
|
||||
|
||||
// Search for new peers.
|
||||
wantedNodes := searchForPeers(iterator, batchPeriod, missingPeerCount, s.filterPeer)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < len(wantedNodes); i++ {
|
||||
node := wantedNodes[i]
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
continue
|
||||
if err := s.findAndDialPeers(ctx); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithError(err).Error("Failed to find and dial peers")
|
||||
}
|
||||
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
wg.Add(1)
|
||||
go func(info *peer.AddrInfo) {
|
||||
if err := s.connectWithPeer(s.ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
wg.Done()
|
||||
}(peerInfo)
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets ensures that our node is connected to enough peers.
|
||||
// If, the threshold is met, then this function immediately returns.
|
||||
// Otherwise, it searches for new peers and dials them.
|
||||
// If `ctx“ is canceled while searching for peers, search is stopped, but new found peers are still dialed.
|
||||
// In this case, the function returns an error.
|
||||
func (s *Service) findAndDialPeers(ctx context.Context) error {
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
|
||||
missingPeerCount := s.missingPeerCount(s.cfg.MaxPeers)
|
||||
for missingPeerCount > 0 {
|
||||
// Stop the search/dialing loop if the context is canceled.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peersToDial, err := func() ([]*enode.Node, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, batchPeriod)
|
||||
defer cancel()
|
||||
|
||||
peersToDial, err := s.findPeers(ctx, missingPeerCount)
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, errors.Wrap(err, "find peers")
|
||||
}
|
||||
|
||||
return peersToDial, nil
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dialedPeerCount := s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
|
||||
if dialedPeerCount > missingPeerCount {
|
||||
missingPeerCount = 0
|
||||
continue
|
||||
}
|
||||
|
||||
missingPeerCount -= dialedPeerCount
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findAndDialPeers finds new peers until `targetPeerCount` is reached, `batchPeriod` is over,
|
||||
// the peers iterator is exhausted or the context is canceled.
|
||||
func (s *Service) findPeers(ctx context.Context, missingPeerCount uint) ([]*enode.Node, error) {
|
||||
// Create an discovery iterator to find new peers.
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
|
||||
// `iterator.Next` can block indefinitely. `iterator.Close` unblocks it.
|
||||
// So it is important to close the iterator when the context is done to ensure
|
||||
// that the search does not hang indefinitely.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
iterator.Close()
|
||||
}()
|
||||
|
||||
// Crawl the network for peers subscribed to the defective subnets.
|
||||
nodeByNodeID := make(map[enode.ID]*enode.Node)
|
||||
for missingPeerCount > 0 && iterator.Next() {
|
||||
if ctx.Err() != nil {
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return peersToDial, ctx.Err()
|
||||
}
|
||||
|
||||
// Skip peer not matching the filter.
|
||||
node := iterator.Node()
|
||||
if !s.filterPeer(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() > node.Seq() {
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
// We found a new peer. Decrease the missing peer count.
|
||||
missingPeerCount--
|
||||
}
|
||||
|
||||
// Convert the map to a slice.
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return peersToDial, nil
|
||||
}
|
||||
|
||||
// missingPeerCount computes how many peers we are missing to reach the target peer count.
|
||||
func (s *Service) missingPeerCount(targetCount uint) uint {
|
||||
// Retrieve how many active peers we have.
|
||||
activePeers := s.Peers().Active()
|
||||
activePeerCount := uint(len(activePeers))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
missingPeerCount := uint(0)
|
||||
if targetCount > activePeerCount {
|
||||
missingPeerCount = targetCount - activePeerCount
|
||||
}
|
||||
|
||||
return missingPeerCount
|
||||
}
|
||||
|
||||
func (s *Service) createListener(
|
||||
ipAddr net.IP,
|
||||
privKey *ecdsa.PrivateKey,
|
||||
@@ -476,7 +553,7 @@ func (s *Service) createLocalNode(
|
||||
ipAddr net.IP,
|
||||
udpPort, tcpPort, quicPort int,
|
||||
) (*enode.LocalNode, error) {
|
||||
db, err := enode.OpenDB("")
|
||||
db, err := enode.OpenDB(s.cfg.DiscoveryDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not open node's peer database")
|
||||
}
|
||||
@@ -496,11 +573,6 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
|
||||
localNode.Set(peerdas.Cgc(custodyGroupCount))
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
@@ -512,6 +584,16 @@ func (s *Service) createLocalNode(
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount, err := s.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
hostIP := net.ParseIP(s.cfg.HostAddress)
|
||||
if hostIP.To4() == nil && hostIP.To16() == nil {
|
||||
@@ -535,7 +617,10 @@ func (s *Service) createLocalNode(
|
||||
localNode.SetFallbackIP(firstIP)
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"seq": localNode.Seq(),
|
||||
"id": localNode.ID(),
|
||||
}).Debug("Local node created")
|
||||
return localNode, nil
|
||||
}
|
||||
|
||||
@@ -551,7 +636,11 @@ func (s *Service) startDiscoveryV5(
|
||||
return nil, errors.Wrap(err, "could not create listener")
|
||||
}
|
||||
record := wrappedListener.Self()
|
||||
log.WithField("ENR", record.String()).Info("Started discovery v5")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"ENR": record.String(),
|
||||
"seq": record.Seq(),
|
||||
}).Info("Started discovery v5")
|
||||
return wrappedListener, nil
|
||||
}
|
||||
|
||||
@@ -562,8 +651,7 @@ func (s *Service) startDiscoveryV5(
|
||||
// 2. Peer hasn't been marked as 'bad'.
|
||||
// 3. Peer is not currently active or connected.
|
||||
// 4. Peer is ready to receive incoming connections.
|
||||
// 5. Peer's fork digest in their ENR matches that of
|
||||
// our localnodes.
|
||||
// 5. Peer's fork digest in their ENR matches that of our localnodes.
|
||||
func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
// Ignore nil node entries passed in.
|
||||
if node == nil {
|
||||
@@ -628,22 +716,24 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
// This checks our set max peers in our config, and
|
||||
// determines whether our currently connected and
|
||||
// active peers are above our set max peer limit.
|
||||
func (s *Service) isPeerAtLimit(inbound bool) bool {
|
||||
numOfConns := len(s.host.Network().Peers())
|
||||
func (s *Service) isPeerAtLimit(direction connectivityDirection) bool {
|
||||
maxPeers := int(s.cfg.MaxPeers)
|
||||
// If we are measuring the limit for inbound peers
|
||||
// we apply the high watermark buffer.
|
||||
if inbound {
|
||||
|
||||
// If we are measuring the limit for inbound peers we apply the high watermark buffer.
|
||||
if direction == inbound {
|
||||
maxPeers += highWatermarkBuffer
|
||||
maxInbound := s.peers.InboundLimit() + highWatermarkBuffer
|
||||
currInbound := len(s.peers.InboundConnected())
|
||||
// Exit early if we are at the inbound limit.
|
||||
if currInbound >= maxInbound {
|
||||
inboundCount := len(s.peers.InboundConnected())
|
||||
|
||||
// Return early if we are at the inbound limit.
|
||||
if inboundCount >= maxInbound {
|
||||
return true
|
||||
}
|
||||
}
|
||||
activePeers := len(s.Peers().Active())
|
||||
return activePeers >= maxPeers || numOfConns >= maxPeers
|
||||
|
||||
peerCount := len(s.host.Network().Peers())
|
||||
activePeerCount := len(s.Peers().Active())
|
||||
return activePeerCount >= maxPeers || peerCount >= maxPeers
|
||||
}
|
||||
|
||||
// isBelowOutboundPeerThreshold checks if the number of outbound peers that
|
||||
@@ -901,7 +991,7 @@ func multiAddrFromString(address string) (ma.Multiaddr, error) {
|
||||
return ma.NewMultiaddr(address)
|
||||
}
|
||||
|
||||
func udpVersionFromIP(ipAddr net.IP) int {
|
||||
func udpVersionFromIP(ipAddr net.IP) udpVersion {
|
||||
if ipAddr.To4() != nil {
|
||||
return udp4
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
@@ -65,6 +64,7 @@ func TestCreateListener(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -91,6 +91,7 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -116,6 +117,7 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
@@ -157,27 +159,27 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: &Config{CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host address",
|
||||
cfg: &Config{HostAddress: "192.168.0.1", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{HostAddress: "192.168.0.1"},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host DNS",
|
||||
cfg: &Config{HostDNS: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{HostDNS: "invalid"},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host DNS",
|
||||
cfg: &Config{HostDNS: "www.google.com", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{HostDNS: "www.google.com"},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
@@ -191,6 +193,8 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
quicPort = 3000
|
||||
)
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Create a private key.
|
||||
address, privKey := createAddrAndPrivKey(t)
|
||||
|
||||
@@ -199,6 +203,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: tt.cfg,
|
||||
custodyInfo: &custodyInfo{groupCount: custodyRequirement},
|
||||
}
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
|
||||
@@ -210,7 +215,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedAddress := address
|
||||
if tt.cfg.HostAddress != "" {
|
||||
if tt.cfg != nil && tt.cfg.HostAddress != "" {
|
||||
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
|
||||
}
|
||||
|
||||
@@ -250,8 +255,8 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
// Check cgc config.
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, custodyGroupCount)))
|
||||
require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodyGroupCount)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
|
||||
require.Equal(t, custodyRequirement, *custodyGroupCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -263,7 +268,9 @@ func TestRebootDiscoveryListener(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
@@ -293,6 +300,8 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{},
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -311,6 +320,7 @@ func TestMultiAddrConversion_OK(t *testing.T) {
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -323,16 +333,16 @@ func TestMultiAddrConversion_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
const port = uint(6000)
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
MaxPeers: 30,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
port := 6000
|
||||
var staticPeers []string
|
||||
var hosts []host.Host
|
||||
// setup other nodes
|
||||
for i := 1; i <= 5; i++ {
|
||||
for i := uint(1); i <= 5; i++ {
|
||||
h, _, ipaddr := createHost(t, port+i)
|
||||
staticPeers = append(staticPeers, fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%s", ipaddr, port+i, h.ID()))
|
||||
hosts = append(hosts, h)
|
||||
@@ -370,24 +380,28 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHostIsResolved(t *testing.T) {
|
||||
// ip.addr.tools - construct domain names that resolve to any given IP address
|
||||
// ex: 192-0-2-1.ip.addr.tools resolves to 192.0.2.1.
|
||||
exampleHost := "96-7-129-13.ip.addr.tools"
|
||||
exampleIP := "96.7.129.13"
|
||||
host := "dns.google"
|
||||
ips := map[string]bool{
|
||||
"8.8.8.8": true,
|
||||
"8.8.4.4": true,
|
||||
"2001:4860:4860::8888": true,
|
||||
"2001:4860:4860::8844": true,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
HostDNS: exampleHost,
|
||||
HostDNS: host,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
ip, key := createAddrAndPrivKey(t)
|
||||
list, err := s.createListener(ip, key)
|
||||
require.NoError(t, err)
|
||||
|
||||
newIP := list.Self().IP()
|
||||
assert.Equal(t, exampleIP, newIP.String(), "Did not resolve to expected IP")
|
||||
assert.Equal(t, true, ips[newIP.String()], "Did not resolve to expected IP")
|
||||
}
|
||||
|
||||
func TestInboundPeerLimit(t *testing.T) {
|
||||
@@ -406,14 +420,14 @@ func TestInboundPeerLimit(t *testing.T) {
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(false), "not at limit for outbound peers")
|
||||
require.Equal(t, false, s.isPeerAtLimit(true), "at limit for inbound peers")
|
||||
require.Equal(t, true, s.isPeerAtLimit(all), "not at limit for outbound peers")
|
||||
require.Equal(t, false, s.isPeerAtLimit(inbound), "at limit for inbound peers")
|
||||
|
||||
for i := 0; i < highWatermarkBuffer; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(true), "not at limit for inbound peers")
|
||||
require.Equal(t, true, s.isPeerAtLimit(inbound), "not at limit for inbound peers")
|
||||
}
|
||||
|
||||
func TestOutboundPeerThreshold(t *testing.T) {
|
||||
@@ -450,6 +464,7 @@ func TestUDPMultiAddress(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
@@ -492,6 +507,35 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
|
||||
assert.Equal(t, true, ipv6Found, "IPv6 discovery address not found")
|
||||
}
|
||||
|
||||
func TestDiscoveryV5_SeqNumber(t *testing.T) {
|
||||
db, err := enode.OpenDB(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
_, key := createAddrAndPrivKey(t)
|
||||
node := enode.NewLocalNode(db, key)
|
||||
node.Set(enr.IPv4{127, 0, 0, 1})
|
||||
currentSeq := node.Seq()
|
||||
s := &Service{dv5Listener: mockListener{localNode: node}}
|
||||
_, err = s.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
newSeq := node.Seq()
|
||||
require.Equal(t, currentSeq+1, newSeq) // node seq should increase when discovery starts
|
||||
|
||||
// see that the keys changing, will change the node seq
|
||||
_, keyTwo := createAddrAndPrivKey(t)
|
||||
nodeTwo := enode.NewLocalNode(db, keyTwo) // use the same db with different key
|
||||
nodeTwo.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
|
||||
seqTwo := nodeTwo.Seq()
|
||||
assert.NotEqual(t, seqTwo, newSeq)
|
||||
sTwo := &Service{dv5Listener: mockListener{localNode: nodeTwo}}
|
||||
_, err = sTwo.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, seqTwo+1, nodeTwo.Seq())
|
||||
|
||||
// see that reloading the same node with same key and db results in same seq number
|
||||
nodeThree := enode.NewLocalNode(db, key)
|
||||
assert.Equal(t, node.Seq(), nodeThree.Seq())
|
||||
}
|
||||
|
||||
func TestCorrectUDPVersion(t *testing.T) {
|
||||
assert.Equal(t, udp4, udpVersionFromIP(net.IPv4zero), "incorrect network version")
|
||||
assert.Equal(t, udp6, udpVersionFromIP(net.IPv6zero), "incorrect network version")
|
||||
@@ -621,7 +665,7 @@ func checkPingCountCacheMetadataRecord(
|
||||
if expected.custodyGroupCount != nil {
|
||||
// Check custody subnet count in ENR.
|
||||
var actualCustodyGroupCount uint64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, &actualCustodyGroupCount))
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, &actualCustodyGroupCount))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expected.custodyGroupCount, actualCustodyGroupCount)
|
||||
|
||||
@@ -784,10 +828,11 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
actualPingCount++
|
||||
return nil
|
||||
},
|
||||
cfg: &Config{UDPPort: 2000, CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{UDPPort: 2000},
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyInfo: &custodyInfo{groupCount: custodyGroupCount},
|
||||
}
|
||||
|
||||
// Set the listener and the metadata.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
Package p2p implements the Ethereum consensus networking specification.
|
||||
|
||||
Canonical spec reference: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md
|
||||
Canonical spec reference: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md
|
||||
|
||||
Prysm specific implementation design docs
|
||||
- Networking Design Doc: https://docs.google.com/document/d/1VyhobQRkEjEkEPxmmdWvaHfKWn0j6dEae_wLZlrFtfU/view
|
||||
|
||||
@@ -40,6 +40,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -73,6 +74,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: root,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
@@ -134,6 +136,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -168,6 +171,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
|
||||
@@ -10,13 +10,17 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
agentVersionKey = "AgentVersion"
|
||||
|
||||
// The time to wait for a status request.
|
||||
timeForStatus = 10 * time.Second
|
||||
)
|
||||
@@ -28,12 +32,15 @@ func peerMultiaddrString(conn network.Conn) string {
|
||||
}
|
||||
|
||||
func (s *Service) connectToPeer(conn network.Conn) {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
|
||||
remotePeer := conn.RemotePeer()
|
||||
|
||||
s.peers.SetConnectionState(remotePeer, peers.Connected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
"agent": agentString(remotePeer, s.Host()),
|
||||
}).Debug("Initiate peer connection")
|
||||
}
|
||||
|
||||
@@ -61,6 +68,7 @@ func (s *Service) disconnectFromPeerOnError(
|
||||
"multiaddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"remainingActivePeers": len(s.peers.Active()),
|
||||
"agent": agentString(remotePeerID, s.Host()),
|
||||
}).
|
||||
Debug("Initiate peer disconnection")
|
||||
|
||||
@@ -72,8 +80,8 @@ func (s *Service) disconnectFromPeerOnError(
|
||||
// and validating the response from the peer.
|
||||
func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Context, id peer.ID) error) {
|
||||
// Peer map and lock to keep track of current connection attempts.
|
||||
var peerLock sync.Mutex
|
||||
peerMap := make(map[peer.ID]bool)
|
||||
peerLock := new(sync.Mutex)
|
||||
|
||||
// This is run at the start of each connection attempt, to ensure
|
||||
// that there aren't multiple inflight connection requests for the
|
||||
@@ -101,6 +109,19 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
remotePeer := conn.RemotePeer()
|
||||
log := log.WithField("peer", remotePeer)
|
||||
direction := conn.Stat().Direction
|
||||
|
||||
// For some reason, right after a disconnection, this `ConnectedF` callback
|
||||
// is called. We want to avoid processing this connection if the peer was
|
||||
// disconnected too recently and if we are at the initiative of this connection.
|
||||
// This is very probably a bug in libp2p.
|
||||
if direction == network.DirOutbound {
|
||||
if err := s.wasDisconnectedTooRecently(remotePeer); err != nil {
|
||||
log.WithError(err).Debug("Skipping connection handler")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Connection handler must be non-blocking as part of libp2p design.
|
||||
go func() {
|
||||
@@ -126,53 +147,56 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
return
|
||||
}
|
||||
|
||||
// Do not perform handshake on inbound dials.
|
||||
if conn.Stat().Direction == network.DirInbound {
|
||||
_, err := s.peers.ChainState(remotePeer)
|
||||
peerExists := err == nil
|
||||
currentTime := prysmTime.Now()
|
||||
|
||||
// Wait for peer to initiate handshake
|
||||
time.Sleep(timeForStatus)
|
||||
|
||||
// Exit if we are disconnected with the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) != network.Connected {
|
||||
if direction != network.DirInbound {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
return
|
||||
}
|
||||
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
|
||||
return
|
||||
}
|
||||
|
||||
if peerExists {
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
|
||||
return
|
||||
}
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
|
||||
return
|
||||
}
|
||||
|
||||
if updated.Before(currentTime) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.connectToPeer(conn)
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
// The connection is inbound.
|
||||
_, err = s.peers.ChainState(remotePeer)
|
||||
peerExists := err == nil
|
||||
currentTime := prysmTime.Now()
|
||||
|
||||
// Wait for peer to initiate handshake
|
||||
time.Sleep(timeForStatus)
|
||||
|
||||
// Exit if we are disconnected with the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) != network.Connected {
|
||||
return
|
||||
}
|
||||
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
|
||||
return
|
||||
}
|
||||
|
||||
if !peerExists {
|
||||
s.connectToPeer(conn)
|
||||
return
|
||||
}
|
||||
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
|
||||
return
|
||||
}
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
|
||||
return
|
||||
}
|
||||
|
||||
if updated.Before(currentTime) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -189,9 +213,10 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
peerID := conn.RemotePeer()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"agent": agentString(peerID, s.Host()),
|
||||
})
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
@@ -212,6 +237,12 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnected)
|
||||
if err := s.peerDisconnectionTime.Add(peerID.String(), time.Now(), cache.DefaultExpiration); err != nil {
|
||||
// The `DisconnectedF` funcition already called for this peer less than `cache.DefaultExpiration` ago. Skip.
|
||||
// (Very probably a bug in libp2p.)
|
||||
log.WithError(err).Trace("Failed to set peer disconnection time")
|
||||
return
|
||||
}
|
||||
|
||||
// Only log disconnections if we were fully connected.
|
||||
if priorState == peers.Connected {
|
||||
@@ -222,3 +253,36 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// wasDisconnectedTooRecently checks if the peer was disconnected within the last second.
|
||||
func (s *Service) wasDisconnectedTooRecently(peerID peer.ID) error {
|
||||
const disconnectionDurationThreshold = 1 * time.Second
|
||||
|
||||
peerDisconnectionTimeObj, ok := s.peerDisconnectionTime.Get(peerID.String())
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
peerDisconnectionTime, ok := peerDisconnectionTimeObj.(time.Time)
|
||||
if !ok {
|
||||
return errors.New("invalid peer disconnection time type")
|
||||
}
|
||||
|
||||
timeSinceDisconnection := time.Since(peerDisconnectionTime)
|
||||
if timeSinceDisconnection < disconnectionDurationThreshold {
|
||||
return errors.Errorf("peer %s was disconnected too recently: %s", peerID, timeSinceDisconnection)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func agentString(pid peer.ID, hst host.Host) string {
|
||||
rawVersion, storeErr := hst.Peerstore().Get(pid, agentVersionKey)
|
||||
|
||||
result, ok := rawVersion.(string)
|
||||
if storeErr != nil || !ok {
|
||||
result = ""
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -32,13 +33,14 @@ type (
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
DataColumnsHandler
|
||||
CustodyManager
|
||||
}
|
||||
|
||||
// Accessor provides access to the Broadcaster and PeerManager interfaces.
|
||||
// Accessor provides access to the Broadcaster, PeerManager and CustodyManager interfaces.
|
||||
Accessor interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
CustodyManager
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
@@ -49,7 +51,7 @@ type (
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar, peersChecked ...chan<- bool) error
|
||||
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
@@ -98,7 +100,7 @@ type (
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshPersistentSubnets()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
|
||||
@@ -118,8 +120,11 @@ type (
|
||||
MetadataSeq() uint64
|
||||
}
|
||||
|
||||
// DataColumnsHandler abstracts some data columns related methods.
|
||||
DataColumnsHandler interface {
|
||||
// CustodyManager abstracts some data columns related methods.
|
||||
CustodyManager interface {
|
||||
EarliestAvailableSlot() (primitives.Slot, error)
|
||||
CustodyGroupCount() (uint64, error)
|
||||
UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
CustodyGroupCountFromPeer(peer.ID) uint64
|
||||
}
|
||||
)
|
||||
|
||||
@@ -54,7 +54,7 @@ type PeerData struct {
|
||||
NextValidTime time.Time
|
||||
// Chain related data.
|
||||
MetaData metadata.Metadata
|
||||
ChainState *ethpb.Status
|
||||
ChainState *ethpb.StatusV2
|
||||
ChainStateLastUpdated time.Time
|
||||
ChainStateValidationError error
|
||||
// Scorers internal data.
|
||||
|
||||
@@ -101,21 +101,24 @@ func (s *BadResponsesScorer) countNoLock(pid peer.ID) (int, error) {
|
||||
|
||||
// Increment increments the number of bad responses we have received from the given remote peer.
|
||||
// If peer doesn't exist this method is no-op.
|
||||
func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
func (s *BadResponsesScorer) Increment(pid peer.ID) int {
|
||||
if pid == "" {
|
||||
return
|
||||
return 0
|
||||
}
|
||||
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
s.store.SetPeerData(pid, &peerdata.PeerData{
|
||||
BadResponses: 1,
|
||||
})
|
||||
return
|
||||
if ok {
|
||||
peerData.BadResponses++
|
||||
return peerData.BadResponses
|
||||
}
|
||||
peerData.BadResponses++
|
||||
|
||||
const badResponses = 1
|
||||
peerData = &peerdata.PeerData{BadResponses: badResponses}
|
||||
s.store.SetPeerData(pid, peerData)
|
||||
return badResponses
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
|
||||
@@ -112,7 +112,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
}
|
||||
|
||||
// SetPeerStatus sets chain state data for a given peer.
|
||||
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, validationError error) {
|
||||
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.StatusV2, validationError error) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
|
||||
@@ -130,14 +130,14 @@ func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, val
|
||||
// PeerStatus gets the chain state of the given remote peer.
|
||||
// This can return nil if there is no known chain state for the peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.StatusV2, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.peerStatusNoLock(pid)
|
||||
}
|
||||
|
||||
// peerStatusNoLock lock-free version of PeerStatus.
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.Status, error) {
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.StatusV2, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.ChainState == nil {
|
||||
return nil, peerdata.ErrNoPeerStatus
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent bad peer",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
@@ -48,7 +48,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer no head slot for the host node is known",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
@@ -61,7 +61,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer head is before ours",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(128)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
@@ -75,12 +75,12 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := primitives.Slot(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
// Set another peer to a higher score.
|
||||
scorer.SetPeerStatus("peer2", &pb.Status{
|
||||
scorer.SetPeerStatus("peer2", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 128,
|
||||
}, nil)
|
||||
@@ -95,7 +95,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := primitives.Slot(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
@@ -108,7 +108,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer no max known slot",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 0,
|
||||
}, nil)
|
||||
@@ -141,7 +141,7 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
}
|
||||
@@ -160,9 +160,9 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.StatusV2{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
@@ -179,12 +179,12 @@ func TestScorers_PeerStatus_PeerStatus(t *testing.T) {
|
||||
})
|
||||
status, err := peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
require.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err)
|
||||
assert.Equal(t, (*pb.Status)(nil), status)
|
||||
assert.Equal(t, (*pb.StatusV2)(nil), status)
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.Status{
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadSlot: 128,
|
||||
}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.Status{
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.StatusV2{
|
||||
HeadSlot: 128,
|
||||
}, p2ptypes.ErrInvalidEpoch)
|
||||
status, err = peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
|
||||
@@ -205,14 +205,14 @@ func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
|
||||
}
|
||||
|
||||
// SetChainState sets the chain state of the given remote peer.
|
||||
func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) {
|
||||
func (p *Status) SetChainState(pid peer.ID, chainState *pb.StatusV2) {
|
||||
p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
|
||||
}
|
||||
|
||||
// ChainState gets the chain state of the given remote peer.
|
||||
// This will error if the peer does not exist.
|
||||
// This will error if there is no known chain state for the peer.
|
||||
func (p *Status) ChainState(pid peer.ID) (*pb.Status, error) {
|
||||
func (p *Status) ChainState(pid peer.ID) (*pb.StatusV2, error) {
|
||||
return p.scorers.PeerStatusScorer().PeerStatus(pid)
|
||||
}
|
||||
|
||||
@@ -393,7 +393,7 @@ func (p *Status) SetNextValidTime(pid peer.ID, nextTime time.Time) {
|
||||
peerData.NextValidTime = nextTime
|
||||
}
|
||||
|
||||
// RandomizeBackOff adds extra backoff period during which peer will not be dialed.
|
||||
// RandomizeBackOff adds extra backoff period during which peer won't be dialed.
|
||||
func (p *Status) RandomizeBackOff(pid peer.ID) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
@@ -289,7 +289,7 @@ func TestPeerChainState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedEpoch := primitives.Epoch(123)
|
||||
p.SetChainState(id, &pb.Status{FinalizedEpoch: finalizedEpoch})
|
||||
p.SetChainState(id, &pb.StatusV2{FinalizedEpoch: finalizedEpoch})
|
||||
|
||||
resChainState, err := p.ChainState(id)
|
||||
require.NoError(t, err)
|
||||
@@ -324,7 +324,7 @@ func TestPeerWithNilChainState(t *testing.T) {
|
||||
|
||||
resChainState, err := p.ChainState(id)
|
||||
require.Equal(t, peerdata.ErrNoPeerStatus, err)
|
||||
var nothing *pb.Status
|
||||
var nothing *pb.StatusV2
|
||||
require.Equal(t, resChainState, nothing)
|
||||
}
|
||||
|
||||
@@ -616,7 +616,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
p.SetChainState(pid1, &pb.StatusV2{
|
||||
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 3,
|
||||
FinalizedRoot: mockroot3[:],
|
||||
@@ -624,7 +624,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
p.SetChainState(pid2, &pb.StatusV2{
|
||||
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 4,
|
||||
FinalizedRoot: mockroot4[:],
|
||||
@@ -632,7 +632,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
p.SetChainState(pid3, &pb.StatusV2{
|
||||
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 5,
|
||||
FinalizedRoot: mockroot5[:],
|
||||
@@ -640,7 +640,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 4
|
||||
pid4 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid4, &pb.Status{
|
||||
p.SetChainState(pid4, &pb.StatusV2{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
@@ -648,7 +648,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 5
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid5, &pb.Status{
|
||||
p.SetChainState(pid5, &pb.StatusV2{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
@@ -1012,7 +1012,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
for _, peerConfig := range tt.peers {
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.StatusV2{
|
||||
FinalizedEpoch: peerConfig.finalizedEpoch,
|
||||
HeadSlot: peerConfig.headSlot,
|
||||
})
|
||||
@@ -1039,7 +1039,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
|
||||
for i := 0; i <= maxPeers+100; i++ {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
|
||||
FinalizedEpoch: 10,
|
||||
})
|
||||
}
|
||||
@@ -1062,7 +1062,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
|
||||
for i, headSlot := range peerSlots {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
|
||||
HeadSlot: headSlot,
|
||||
})
|
||||
}
|
||||
@@ -1085,17 +1085,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
|
||||
})
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
p.SetChainState(pid1, &pb.StatusV2{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
p.SetChainState(pid2, &pb.StatusV2{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
p.SetChainState(pid3, &pb.StatusV2{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
|
||||
|
||||
@@ -108,6 +108,8 @@ const (
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
|
||||
RPCStatusTopicV2 = protocolPrefix + StatusMessageName + SchemaVersionV2
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
|
||||
// RPCBlocksByRootTopicV2 defines the v2 topic for the blocks by root rpc method.
|
||||
@@ -130,6 +132,7 @@ var (
|
||||
RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Status Message
|
||||
RPCStatusTopicV1: new(pb.Status),
|
||||
RPCStatusTopicV2: new(pb.StatusV2),
|
||||
|
||||
// RPC Goodbye Message
|
||||
RPCGoodByeTopicV1: new(primitives.SSZUint64),
|
||||
@@ -201,6 +204,7 @@ var (
|
||||
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
fuluMapping = map[string]string{
|
||||
StatusMessageName: SchemaVersionV2,
|
||||
MetadataMessageName: SchemaVersionV3,
|
||||
}
|
||||
|
||||
|
||||
@@ -141,6 +141,11 @@ func TestTopicFromMessage_CorrectType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/beacon_blocks_by_range/2", topic)
|
||||
|
||||
// Modified in fulu fork.
|
||||
topic, err = TopicFromMessage(StatusMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/status/2", topic)
|
||||
|
||||
// Modified both in altair and fulu fork.
|
||||
topic, err = TopicFromMessage(MetadataMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
prysmnetwork "github.com/OffchainLabs/prysm/v6/network"
|
||||
@@ -31,58 +32,73 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
|
||||
// In the event that we are at our peer limit, we
|
||||
// stop looking for new peers and instead poll
|
||||
// for the current peer limit status for the time period
|
||||
// defined below.
|
||||
var pollingPeriod = 6 * time.Second
|
||||
const (
|
||||
// When looking for new nodes, if not enough nodes are found,
|
||||
// we stop after this spent time.
|
||||
batchPeriod = 2 * time.Second
|
||||
|
||||
// When looking for new nodes, if not enough nodes are found,
|
||||
// we stop after this spent time.
|
||||
var batchPeriod = 2 * time.Second
|
||||
// maxBadResponses is the maximum number of bad responses from a peer before we stop talking to it.
|
||||
maxBadResponses = 5
|
||||
)
|
||||
|
||||
// Refresh rate of ENR set at twice per slot.
|
||||
var refreshRate = slots.DivideSlotBy(2)
|
||||
var (
|
||||
// Refresh rate of ENR set at twice per slot.
|
||||
refreshRate = slots.DivideSlotBy(2)
|
||||
|
||||
// maxBadResponses is the maximum number of bad responses from a peer before we stop talking to it.
|
||||
const maxBadResponses = 5
|
||||
// maxDialTimeout is the timeout for a single peer dial.
|
||||
maxDialTimeout = params.BeaconConfig().RespTimeoutDuration()
|
||||
|
||||
// maxDialTimeout is the timeout for a single peer dial.
|
||||
var maxDialTimeout = params.BeaconConfig().RespTimeoutDuration()
|
||||
// In the event that we are at our peer limit, we
|
||||
// stop looking for new peers and instead poll
|
||||
// for the current peer limit status for the time period
|
||||
// defined below.
|
||||
pollingPeriod = 6 * time.Second
|
||||
)
|
||||
|
||||
// Service for managing peer to peer (p2p) networking.
|
||||
type Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
}
|
||||
type (
|
||||
Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
}
|
||||
|
||||
custodyInfo struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
groupCount uint64
|
||||
}
|
||||
)
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
// connections are made until the Start function is called during the service registry startup.
|
||||
@@ -111,16 +127,17 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ipLimiter := leakybucket.NewCollector(ipLimit, ipBurst, 30*time.Second, true /* deleteEmptyBuckets */)
|
||||
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: cfg,
|
||||
addrFilter: addrFilter,
|
||||
ipLimiter: ipLimiter,
|
||||
privKey: privKey,
|
||||
metaData: metaData,
|
||||
isPreGenesis: true,
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: cfg,
|
||||
addrFilter: addrFilter,
|
||||
ipLimiter: ipLimiter,
|
||||
privKey: privKey,
|
||||
metaData: metaData,
|
||||
isPreGenesis: true,
|
||||
joinedTopics: make(map[string]*pubsub.Topic, len(gossipTopicMappings)),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
peerDisconnectionTime: cache.New(1*time.Second, 1*time.Minute),
|
||||
}
|
||||
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
@@ -251,6 +268,7 @@ func (s *Service) Start() {
|
||||
"inboundTCP": inboundTCPCount,
|
||||
"outboundTCP": outboundTCPCount,
|
||||
"total": total,
|
||||
"target": s.cfg.MaxPeers,
|
||||
}
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
@@ -403,7 +421,10 @@ func (s *Service) pingPeersAndLogEnr() {
|
||||
defer s.pingMethodLock.RUnlock()
|
||||
|
||||
localENR := s.dv5Listener.Self()
|
||||
log.WithField("ENR", localENR).Info("New node record")
|
||||
log.WithFields(logrus.Fields{
|
||||
"ENR": localENR,
|
||||
"seq": localENR.Seq(),
|
||||
}).Info("New node record")
|
||||
|
||||
if s.pingMethod == nil {
|
||||
return
|
||||
@@ -481,14 +502,17 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
|
||||
if info.ID == s.host.ID() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.Peers().IsBad(info.ID); err != nil {
|
||||
return errors.Wrap(err, "refused to connect to bad peer")
|
||||
return errors.Wrap(err, "bad peer")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := s.host.Connect(ctx, info); err != nil {
|
||||
s.Peers().Scorers().BadResponsesScorer().Increment(info.ID)
|
||||
return err
|
||||
s.downscorePeer(info.ID, "connectionError")
|
||||
return errors.Wrap(err, "peer connect")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -519,3 +543,8 @@ func (s *Service) connectToBootnodes() error {
|
||||
func (s *Service) isInitialized() bool {
|
||||
return !s.genesisTime.IsZero() && len(s.genesisValidatorsRoot) == 32
|
||||
}
|
||||
|
||||
func (s *Service) downscorePeer(peerID peer.ID, reason string) {
|
||||
newScore := s.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
@@ -72,7 +73,7 @@ func (mockListener) RandomNodes() enode.Iterator {
|
||||
|
||||
func (mockListener) RebootListener() error { panic("implement me") }
|
||||
|
||||
func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) {
|
||||
func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP) {
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
listen, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
@@ -113,6 +114,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
s, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.custodyInfo = &custodyInfo{}
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
s.Start()
|
||||
@@ -185,21 +187,34 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListenForNewNodes(t *testing.T) {
|
||||
const (
|
||||
port = uint(2000)
|
||||
testPollingPeriod = 1 * time.Second
|
||||
peerCount = 5
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
// Setup bootnode.
|
||||
notifier := &mock.MockStateNotifier{}
|
||||
cfg := &Config{StateNotifier: notifier, PingInterval: testPingInterval, DisableLivenessCheck: true}
|
||||
port := 2000
|
||||
cfg.UDPPort = uint(port)
|
||||
cfg := &Config{
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
UDPPort: port,
|
||||
}
|
||||
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
genesisTime := prysmTime.Now()
|
||||
var gvr [32]byte
|
||||
var gvr [fieldparams.RootLength]byte
|
||||
|
||||
s := &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: gvr[:],
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
@@ -210,35 +225,41 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
|
||||
// Use shorter period for testing.
|
||||
currentPeriod := pollingPeriod
|
||||
pollingPeriod = 1 * time.Second
|
||||
pollingPeriod = testPollingPeriod
|
||||
defer func() {
|
||||
pollingPeriod = currentPeriod
|
||||
}()
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
var hosts []host.Host
|
||||
// setup other nodes.
|
||||
// Setup other nodes.
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg = &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
MaxPeers: 30,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
for i := 1; i <= 5; i++ {
|
||||
listeners := make([]*listenerWrapper, 0, peerCount)
|
||||
hosts := make([]host.Host, 0, peerCount)
|
||||
|
||||
for i := uint(1); i <= peerCount; i++ {
|
||||
cfg = &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
MaxPeers: peerCount,
|
||||
ClockWaiter: cs,
|
||||
UDPPort: port + i,
|
||||
TCPPort: port + i,
|
||||
}
|
||||
|
||||
h, pkey, ipAddr := createHost(t, port+i)
|
||||
cfg.UDPPort = uint(port + i)
|
||||
cfg.TCPPort = uint(port + i)
|
||||
|
||||
s := &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: gvr[:],
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
require.NoError(t, err, "Could not start discovery for node")
|
||||
|
||||
listeners = append(listeners, listener)
|
||||
hosts = append(hosts, h)
|
||||
}
|
||||
@@ -263,19 +284,27 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
s.Start()
|
||||
<-exitRoutine
|
||||
}()
|
||||
time.Sleep(1 * time.Second)
|
||||
s.custodyInfo = &custodyInfo{}
|
||||
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(genesisTime, gvr)))
|
||||
go s.Start()
|
||||
|
||||
time.Sleep(4 * time.Second)
|
||||
assert.Equal(t, 5, len(s.host.Network().Peers()), "Not all peers added to peerstore")
|
||||
require.NoError(t, s.Stop())
|
||||
exitRoutine <- true
|
||||
err = cs.SetClock(startup.NewClock(genesisTime, gvr))
|
||||
require.NoError(t, err, "Could not set clock in service")
|
||||
|
||||
actualPeerCount := len(s.host.Network().Peers())
|
||||
for range 40 {
|
||||
if actualPeerCount == peerCount {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
actualPeerCount = len(s.host.Network().Peers())
|
||||
}
|
||||
|
||||
assert.Equal(t, peerCount, actualPeerCount, "Not all peers added to peerstore")
|
||||
|
||||
err = s.Stop()
|
||||
require.NoError(t, err, "Failed to stop service")
|
||||
}
|
||||
|
||||
func TestPeer_Disconnect(t *testing.T) {
|
||||
@@ -378,7 +407,7 @@ func TestService_connectWithPeer(t *testing.T) {
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "refused to connect to bad peer",
|
||||
wantErr: "bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
@@ -23,7 +25,6 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -57,249 +58,302 @@ const blobSubnetLockerVal = 110
|
||||
const dataColumnSubnetVal = 150
|
||||
|
||||
// nodeFilter returns a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node) bool, error) {
|
||||
func (s *Service) nodeFilter(topic string, indices map[uint64]int) (func(node *enode.Node) (map[uint64]bool, error), error) {
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return s.filterPeerForAttSubnet(index), nil
|
||||
return s.filterPeerForAttSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return s.filterPeerForSyncSubnet(index), nil
|
||||
return s.filterPeerForSyncSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
return s.filterPeerForBlobSubnet(), nil
|
||||
return s.filterPeerForBlobSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
return s.filterPeerForDataColumnsSubnet(index), nil
|
||||
return s.filterPeerForDataColumnsSubnet(indices), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
}
|
||||
|
||||
// searchForPeers performs a network search for peers subscribed to a particular subnet.
|
||||
// It exits as soon as one of these conditions is met:
|
||||
// - It looped through `batchSize` nodes.
|
||||
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria.
|
||||
// - Iterator is exhausted.
|
||||
func searchForPeers(
|
||||
iterator enode.Iterator,
|
||||
batchPeriod time.Duration,
|
||||
peersToFindCount uint,
|
||||
filter func(node *enode.Node) bool,
|
||||
) []*enode.Node {
|
||||
nodeFromNodeID := make(map[enode.ID]*enode.Node)
|
||||
start := time.Now()
|
||||
// FindAndDialPeersWithSubnets ensures that our node is connected to at least `minimumPeersPerSubnet`
|
||||
// peers for each subnet listed in `subnets`.
|
||||
// If, for all subnets, the threshold is met, then this function immediately returns.
|
||||
// Otherwise, it searches for new peers for defective subnets, and dials them.
|
||||
// If `ctx“ is canceled while searching for peers, search is stopped, but new found peers are still dialed.
|
||||
// In this case, the function returns an error.
|
||||
func (s *Service) FindAndDialPeersWithSubnets(
|
||||
ctx context.Context,
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
subnets map[uint64]bool,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindAndDialPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
for time.Since(start) < batchPeriod && uint(len(nodeFromNodeID)) < peersToFindCount && iterator.Next() {
|
||||
// Return early if the discovery listener isn't set.
|
||||
if s.dv5Listener == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
|
||||
defectiveSubnets := s.defectiveSubnets(topicFormat, digest, minimumPeersPerSubnet, subnets)
|
||||
for len(defectiveSubnets) > 0 {
|
||||
// Stop the search/dialing loop if the context is canceled.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peersToDial, err := func() ([]*enode.Node, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, batchPeriod)
|
||||
defer cancel()
|
||||
|
||||
peersToDial, err := s.findPeersWithSubnets(ctx, topicFormat, digest, minimumPeersPerSubnet, defectiveSubnets)
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
return peersToDial, nil
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dial new peers in batches.
|
||||
s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
|
||||
defectiveSubnets = s.defectiveSubnets(topicFormat, digest, minimumPeersPerSubnet, subnets)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findPeersWithSubnets finds peers subscribed to defective subnets in batches
|
||||
// until enough peers are found or the context is canceled.
|
||||
// It returns new peers found during the search.
|
||||
func (s *Service) findPeersWithSubnets(
|
||||
ctx context.Context,
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
defectiveSubnetsOrigin map[uint64]int,
|
||||
) ([]*enode.Node, error) {
|
||||
// Copy the defective subnets map to avoid modifying the original map.
|
||||
defectiveSubnets := make(map[uint64]int, len(defectiveSubnetsOrigin))
|
||||
for k, v := range defectiveSubnetsOrigin {
|
||||
defectiveSubnets[k] = v
|
||||
}
|
||||
|
||||
// Create an discovery iterator to find new peers.
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
|
||||
// `iterator.Next` can block indefinitely. `iterator.Close` unblocks it.
|
||||
// So it is important to close the iterator when the context is done to ensure
|
||||
// that the search does not hang indefinitely.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
iterator.Close()
|
||||
}()
|
||||
|
||||
// Retrieve the filter function that will be used to filter nodes based on the defective subnets.
|
||||
filter, err := s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
// Crawl the network for peers subscribed to the defective subnets.
|
||||
nodeByNodeID := make(map[enode.ID]*enode.Node)
|
||||
for len(defectiveSubnets) > 0 && iterator.Next() {
|
||||
if err := ctx.Err(); err != nil {
|
||||
// Convert the map to a slice.
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return peersToDial, err
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
node := iterator.Node()
|
||||
|
||||
// Filter out nodes that do not meet the criteria.
|
||||
if !filter(node) {
|
||||
if !s.filterPeer(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter node")
|
||||
}
|
||||
if len(nodeSubnets) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
prevNode, ok := nodeFromNodeID[node.ID()]
|
||||
if ok && prevNode.Seq() > node.Seq() {
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() > node.Seq() {
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
nodeFromNodeID[node.ID()] = node
|
||||
// We found a new peer. Modify the defective subnets map
|
||||
// and the filter accordingly.
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
|
||||
defectiveSubnets[subnet]--
|
||||
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the map to a slice.
|
||||
nodes := make([]*enode.Node, 0, len(nodeFromNodeID))
|
||||
for _, node := range nodeFromNodeID {
|
||||
nodes = append(nodes, node)
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
return peersToDial, nil
|
||||
}
|
||||
|
||||
// dialPeer dials a peer in a separate goroutine.
|
||||
func (s *Service) dialPeer(ctx context.Context, wg *sync.WaitGroup, node *enode.Node) {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
return
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
// defectiveSubnets returns a map of subnets that have fewer than the minimum peer count.
|
||||
func (s *Service) defectiveSubnets(
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
subnets map[uint64]bool,
|
||||
) map[uint64]int {
|
||||
missingCountPerSubnet := make(map[uint64]int, len(subnets))
|
||||
for subnet := range subnets {
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + s.Encoding().ProtocolSuffix()
|
||||
peers := s.pubsub.ListPeers(topic)
|
||||
peerCount := len(peers)
|
||||
if peerCount < minimumPeersPerSubnet {
|
||||
missingCountPerSubnet[subnet] = minimumPeersPerSubnet - peerCount
|
||||
}
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
return missingCountPerSubnet
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then it tries to connect
|
||||
// with those peers. This method will block until either:
|
||||
// - the required amount of peers are found, or
|
||||
// - the context is terminated.
|
||||
// On some edge cases, this method may hang indefinitely while peers
|
||||
// are actually found. In such a case, the user should cancel the context
|
||||
// and re-run the method again.
|
||||
func (s *Service) FindPeersWithSubnet(
|
||||
ctx context.Context,
|
||||
topic string,
|
||||
index uint64,
|
||||
threshold int,
|
||||
) (bool, error) {
|
||||
const minLogInterval = 1 * time.Minute
|
||||
// dialPeers dials multiple peers concurrently up to `maxConcurrentDials` at a time.
|
||||
// In case of a dial failure, it logs the error but continues dialing other peers.
|
||||
func (s *Service) dialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
var mut sync.Mutex
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
span.SetAttributes(trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
|
||||
if s.dv5Listener == nil {
|
||||
// Return if discovery isn't set
|
||||
return false, nil
|
||||
}
|
||||
|
||||
topic += s.Encoding().ProtocolSuffix()
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
defer iterator.Close()
|
||||
|
||||
filter, err := s.nodeFilter(topic, index)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
peersSummary := func(topic string, threshold int) (int, int) {
|
||||
// Retrieve how many peers we have for this topic.
|
||||
peerCountForTopic := len(s.pubsub.ListPeers(topic))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
missingPeerCountForTopic := max(0, threshold-peerCountForTopic)
|
||||
|
||||
return peerCountForTopic, missingPeerCountForTopic
|
||||
}
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// Exit early if we have enough peers.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"targetPeerCount": threshold,
|
||||
})
|
||||
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - start")
|
||||
|
||||
lastLogTime := time.Now()
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
// If the context is done, we can exit the loop. This is the unhappy path.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf(
|
||||
"unable to find requisite number of peers for topic %s - only %d out of %d peers available after searching",
|
||||
topic, peerCountForTopic, threshold,
|
||||
)
|
||||
counter := uint(0)
|
||||
for start := 0; start < len(nodes); start += maxConcurrentDials {
|
||||
if ctx.Err() != nil {
|
||||
return counter
|
||||
}
|
||||
|
||||
// Search for new peers in the network.
|
||||
nodes := searchForPeers(iterator, batchPeriod, uint(missingPeerCountForTopic), filter)
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
|
||||
// Dial the peers in batches.
|
||||
for start := 0; start < len(nodes); start += maxConcurrentDials {
|
||||
stop := min(start+maxConcurrentDials, len(nodes))
|
||||
for _, node := range nodes[start:stop] {
|
||||
s.dialPeer(ctx, wg, node)
|
||||
var wg sync.WaitGroup
|
||||
stop := min(start+maxConcurrentDials, len(nodes))
|
||||
for _, node := range nodes[start:stop] {
|
||||
log := log.WithField("nodeID", node.ID())
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert node to addr info")
|
||||
continue
|
||||
}
|
||||
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
if info == nil {
|
||||
log.Debug("Nil addr info")
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).WithField("info", info.String()).Debug("Could not connect with peer")
|
||||
return
|
||||
}
|
||||
|
||||
mut.Lock()
|
||||
defer mut.Unlock()
|
||||
counter++
|
||||
}()
|
||||
}
|
||||
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// If we have enough peers, we can exit the loop. This is the happy path.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - continue")
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
log.WithField("currentPeerCount", threshold).Debug("Searching for new peers for a subnet - success")
|
||||
return true, nil
|
||||
return counter
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular attestation subnet.
|
||||
func (s *Service) filterPeerForAttSubnet(index uint64) func(node *enode.Node) bool {
|
||||
return func(node *enode.Node) bool {
|
||||
// filterPeerForAttSubnet returns a method with filters peers specifically for a particular attestation subnet.
|
||||
func (s *Service) filterPeerForAttSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return false
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := attSubnets(node.Record())
|
||||
subnets, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return false
|
||||
return nil, errors.Wrap(err, "attestation subnets")
|
||||
}
|
||||
|
||||
return subnets[index]
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular sync subnet.
|
||||
func (s *Service) filterPeerForSyncSubnet(index uint64) func(node *enode.Node) bool {
|
||||
return func(node *enode.Node) bool {
|
||||
func (s *Service) filterPeerForSyncSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return false
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := syncSubnets(node.Record())
|
||||
if err != nil {
|
||||
return false
|
||||
return nil, errors.Wrap(err, "sync subnets")
|
||||
}
|
||||
indExists := false
|
||||
for _, comIdx := range subnets {
|
||||
if comIdx == index {
|
||||
indExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return indExists
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular blob subnet.
|
||||
// All peers are supposed to be subscribed to all blob subnets.
|
||||
func (s *Service) filterPeerForBlobSubnet() func(_ *enode.Node) bool {
|
||||
return func(_ *enode.Node) bool {
|
||||
return true
|
||||
func (s *Service) filterPeerForBlobSubnet(indices map[uint64]int) func(_ *enode.Node) (map[uint64]bool, error) {
|
||||
result := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
result[i] = true
|
||||
}
|
||||
|
||||
return func(_ *enode.Node) (map[uint64]bool, error) {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular data column subnet.
|
||||
func (s *Service) filterPeerForDataColumnsSubnet(index uint64) func(node *enode.Node) bool {
|
||||
return func(node *enode.Node) bool {
|
||||
func (s *Service) filterPeerForDataColumnsSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return false
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := dataColumnSubnets(node.ID(), node.Record())
|
||||
if err != nil {
|
||||
return false
|
||||
return nil, errors.Wrap(err, "data column subnets")
|
||||
}
|
||||
|
||||
return subnets[index]
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,11 +390,23 @@ func (s *Service) updateSubnetRecordWithMetadata(bitV bitfield.Bitvector64) {
|
||||
// with a new value for a bitfield of subnets tracked. It also record's
|
||||
// the sync committee subnet in the enr. It also updates the node's
|
||||
// metadata by increasing the sequence number and the subnets tracked by the node.
|
||||
func (s *Service) updateSubnetRecordWithMetadataV2(bitVAtt bitfield.Bitvector64, bitVSync bitfield.Bitvector4) {
|
||||
func (s *Service) updateSubnetRecordWithMetadataV2(
|
||||
bitVAtt bitfield.Bitvector64,
|
||||
bitVSync bitfield.Bitvector4,
|
||||
custodyGroupCount uint64,
|
||||
) {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, &bitVAtt)
|
||||
subEntry := enr.WithEntry(syncCommsSubnetEnrKey, &bitVSync)
|
||||
s.dv5Listener.LocalNode().Set(entry)
|
||||
s.dv5Listener.LocalNode().Set(subEntry)
|
||||
|
||||
localNode := s.dv5Listener.LocalNode()
|
||||
localNode.Set(entry)
|
||||
localNode.Set(subEntry)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCountEntry := enr.WithEntry(custodyGroupCountEnrKey, custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
|
||||
s.metaData = wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: s.metaData.SequenceNumber() + 1,
|
||||
Attnets: bitVAtt,
|
||||
@@ -367,10 +433,8 @@ func (s *Service) updateSubnetRecordWithMetadataV3(
|
||||
localNode.Set(syncSubnetsEntry)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
|
||||
newSeqNumber := s.metaData.SequenceNumber() + 1
|
||||
|
||||
s.metaData = wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: newSeqNumber,
|
||||
SeqNumber: s.metaData.SequenceNumber() + 1,
|
||||
Attnets: bitVAtt,
|
||||
Syncnets: bitVSync,
|
||||
CustodyGroupCount: custodyGroupCount,
|
||||
@@ -475,43 +539,47 @@ func initializeSyncCommSubnets(node *enode.LocalNode) *enode.LocalNode {
|
||||
|
||||
// Reads the attestation subnets entry from a node's ENR and determines
|
||||
// the committee indices of the attestation subnets the node is subscribed to.
|
||||
func attSubnets(record *enr.Record) (map[uint64]bool, error) {
|
||||
func attestationSubnets(record *enr.Record) (map[uint64]bool, error) {
|
||||
bitV, err := attBitvector(record)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committeeIdxs := make(map[uint64]bool)
|
||||
// lint:ignore uintcast -- subnet count can be safely cast to int.
|
||||
if len(bitV) != byteCount(int(attestationSubnetCount)) {
|
||||
return committeeIdxs, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
return nil, errors.Wrap(err, "att bit vector")
|
||||
}
|
||||
|
||||
for i := uint64(0); i < attestationSubnetCount; i++ {
|
||||
// lint:ignore uintcast -- subnet count can be safely cast to int.
|
||||
if len(bitV) != byteCount(int(attestationSubnetCount)) {
|
||||
return nil, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
}
|
||||
|
||||
indices := make(map[uint64]bool, attestationSubnetCount)
|
||||
for i := range attestationSubnetCount {
|
||||
if bitV.BitAt(i) {
|
||||
committeeIdxs[i] = true
|
||||
indices[i] = true
|
||||
}
|
||||
}
|
||||
return committeeIdxs, nil
|
||||
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
// Reads the sync subnets entry from a node's ENR and determines
|
||||
// the committee indices of the sync subnets the node is subscribed to.
|
||||
func syncSubnets(record *enr.Record) ([]uint64, error) {
|
||||
func syncSubnets(record *enr.Record) (map[uint64]bool, error) {
|
||||
bitV, err := syncBitvector(record)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "sync bit vector")
|
||||
}
|
||||
|
||||
// lint:ignore uintcast -- subnet count can be safely cast to int.
|
||||
if len(bitV) != byteCount(int(syncCommsSubnetCount)) {
|
||||
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
return nil, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
}
|
||||
var committeeIdxs []uint64
|
||||
for i := uint64(0); i < syncCommsSubnetCount; i++ {
|
||||
|
||||
indices := make(map[uint64]bool, syncCommsSubnetCount)
|
||||
for i := range syncCommsSubnetCount {
|
||||
if bitV.BitAt(i) {
|
||||
committeeIdxs = append(committeeIdxs, i)
|
||||
indices[i] = true
|
||||
}
|
||||
}
|
||||
return committeeIdxs, nil
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
// Retrieve the data columns subnets from a node's ENR and node ID.
|
||||
@@ -585,3 +653,16 @@ func byteCount(bitCount int) int {
|
||||
}
|
||||
return numOfBytes
|
||||
}
|
||||
|
||||
// interesect intersects two maps and returns a new map containing only the keys
|
||||
// that are present in both maps.
|
||||
func intersect(left map[uint64]int, right map[uint64]bool) map[uint64]bool {
|
||||
result := make(map[uint64]bool, min(len(left), len(right)))
|
||||
for i := range left {
|
||||
if right[i] {
|
||||
result[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -22,7 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
// Topology of this test:
|
||||
//
|
||||
//
|
||||
@@ -37,7 +36,12 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
|
||||
|
||||
// Define the genesis validators root, to ensure everybody is on the same network.
|
||||
const genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
|
||||
const (
|
||||
genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
|
||||
subnetCount = 3
|
||||
minimumPeersPerSubnet = 1
|
||||
)
|
||||
|
||||
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -70,6 +74,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
bootNodeForkDigest, err := bootNodeService.currentForkDigest()
|
||||
@@ -87,13 +92,12 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
|
||||
// Create 3 nodes, each subscribed to a different subnet.
|
||||
// Each node is connected to the bootstrap node.
|
||||
services := make([]*Service, 0, 3)
|
||||
services := make([]*Service, 0, subnetCount)
|
||||
|
||||
for i := 1; i <= 3; i++ {
|
||||
subnet := uint64(i)
|
||||
for i := uint64(1); i <= subnetCount; i++ {
|
||||
service, err := NewService(ctx, &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
MaxPeers: 30,
|
||||
MaxPeers: 0, // Set to 0 to ensure that peers are discovered via subnets search, and not generic peers discovery.
|
||||
UDPPort: uint(2000 + i),
|
||||
TCPPort: uint(3000 + i),
|
||||
QUICPort: uint(3000 + i),
|
||||
@@ -105,6 +109,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
nodeForkDigest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -115,12 +120,13 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
|
||||
// Set the ENR `attnets`, used by Prysm to filter peers by subnet.
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(subnet, true)
|
||||
bitV.SetBitAt(i, true)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, &bitV)
|
||||
service.dv5Listener.LocalNode().Set(entry)
|
||||
|
||||
// Join and subscribe to the subnet, needed by libp2p.
|
||||
topic, err := service.pubsub.Join(fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet) + "/ssz_snappy")
|
||||
topicName := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, i) + "/ssz_snappy"
|
||||
topic, err := service.pubsub.Join(topicName)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = topic.Subscribe()
|
||||
@@ -153,6 +159,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
service.Start()
|
||||
defer func() {
|
||||
@@ -160,37 +167,18 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Look up 3 different subnets.
|
||||
exists := make([]bool, 0, 3)
|
||||
for i := 1; i <= 3; i++ {
|
||||
subnet := uint64(i)
|
||||
topic := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, subnet)
|
||||
subnets := map[uint64]bool{1: true, 2: true, 3: true}
|
||||
defectiveSubnets := service.defectiveSubnets(AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.Equal(t, subnetCount, len(defectiveSubnets))
|
||||
|
||||
exist := false
|
||||
ctxWithTimeOut, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// This for loop is used to ensure we don't get stuck in `FindPeersWithSubnet`.
|
||||
// Read the documentation of `FindPeersWithSubnet` for more details.
|
||||
for j := 0; j < 3; j++ {
|
||||
ctxWithTimeOut, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
err = service.FindAndDialPeersWithSubnets(ctxWithTimeOut, AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.NoError(t, err)
|
||||
|
||||
exist, err = service.FindPeersWithSubnet(ctxWithTimeOut, topic, subnet, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
if exist {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
exists = append(exists, exist)
|
||||
|
||||
}
|
||||
|
||||
// Check if all peers are found.
|
||||
for _, exist := range exists {
|
||||
require.Equal(t, true, exist, "Peer with subnet doesn't exist")
|
||||
}
|
||||
defectiveSubnets = service.defectiveSubnets(AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.Equal(t, 0, len(defectiveSubnets))
|
||||
}
|
||||
|
||||
func Test_AttSubnets(t *testing.T) {
|
||||
@@ -305,37 +293,34 @@ func Test_AttSubnets(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
record := tt.record(localNode)
|
||||
|
||||
got, err := attSubnets(record)
|
||||
got, err := attestationSubnets(record)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("syncSubnets() error = %v, wantErr %v", err, tt.wantErr)
|
||||
t.Errorf("attestationSubnets() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.wantErr {
|
||||
assert.ErrorContains(t, tt.errContains, err)
|
||||
require.ErrorContains(t, tt.errContains, err)
|
||||
}
|
||||
|
||||
want := make(map[uint64]bool, len(tt.want))
|
||||
require.Equal(t, len(tt.want), len(got))
|
||||
for _, subnet := range tt.want {
|
||||
want[subnet] = true
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("syncSubnets() got = %v, want %v", got, want)
|
||||
require.Equal(t, true, got[subnet])
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -494,11 +479,14 @@ func Test_SyncSubnets(t *testing.T) {
|
||||
t.Errorf("syncSubnets() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.wantErr {
|
||||
assert.ErrorContains(t, tt.errContains, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("syncSubnets() got = %v, want %v", got, tt.want)
|
||||
|
||||
require.Equal(t, len(tt.want), len(got))
|
||||
for _, subnet := range tt.want {
|
||||
require.Equal(t, true, got[subnet])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
@@ -24,6 +25,7 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -68,9 +69,9 @@ func (*FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (*FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return false, nil
|
||||
// FindAndDialPeersWithSubnets mocks the p2p func.
|
||||
func (*FakeP2P) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
@@ -167,7 +168,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
||||
}
|
||||
|
||||
// BroadcastDataColumn -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar, _ ...chan<- bool) error {
|
||||
func (*FakeP2P) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -196,6 +197,22 @@ func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot -- fake.
|
||||
func (*FakeP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount -- fake.
|
||||
func (*FakeP2P) CustodyGroupCount() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo -- fake.
|
||||
func (s *FakeP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer -- fake.
|
||||
func (*FakeP2P) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar, ...chan<- bool) error {
|
||||
func (m *MockBroadcaster) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
@@ -56,9 +57,9 @@ func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
// RefreshPersistentSubnets .
|
||||
func (*MockPeerManager) RefreshPersistentSubnets() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (*MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return true, nil
|
||||
// FindAndDialPeersWithSubnet .
|
||||
func (*MockPeerManager) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPingMethod .
|
||||
|
||||
@@ -65,7 +65,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
}
|
||||
m.peers.Add(createENR(), id0, ma0, network.DirInbound)
|
||||
m.peers.SetConnectionState(id0, peers.Connected)
|
||||
m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: 10})
|
||||
m.peers.SetChainState(id0, &pb.StatusV2{FinalizedEpoch: 10})
|
||||
id1, err := peer.Decode(MockRawPeerId1)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
@@ -76,7 +76,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
}
|
||||
m.peers.Add(createENR(), id1, ma1, network.DirOutbound)
|
||||
m.peers.SetConnectionState(id1, peers.Connected)
|
||||
m.peers.SetChainState(id1, &pb.Status{FinalizedEpoch: 11})
|
||||
m.peers.SetChainState(id1, &pb.StatusV2{FinalizedEpoch: 11})
|
||||
}
|
||||
return m.peers
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -48,16 +50,19 @@ const (
|
||||
|
||||
// TestP2P represents a p2p implementation that can be used for testing.
|
||||
type TestP2P struct {
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled atomic.Bool
|
||||
DelaySend bool
|
||||
Digest [4]byte
|
||||
peers *peers.Status
|
||||
LocalMetadata metadata.Metadata
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled atomic.Bool
|
||||
DelaySend bool
|
||||
Digest [4]byte
|
||||
peers *peers.Status
|
||||
LocalMetadata metadata.Metadata
|
||||
custodyInfoMut sync.RWMutex // protects custodyGroupCount and earliestAvailableSlot
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -224,7 +229,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar, ...chan<- bool) error {
|
||||
func (p *TestP2P) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
@@ -408,9 +413,9 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
return p.peers
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (*TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return false, nil
|
||||
// FindAndDialPeersWithSubnets mocks the p2p func.
|
||||
func (*TestP2P) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
@@ -461,6 +466,34 @@ func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot .
|
||||
func (s *TestP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount .
|
||||
func (s *TestP2P) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo .
|
||||
func (s *TestP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
s.custodyInfoMut.Lock()
|
||||
defer s.custodyInfoMut.Unlock()
|
||||
|
||||
s.earliestAvailableSlot = earliestAvailableSlot
|
||||
s.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer .
|
||||
func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of groups.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
@@ -985,6 +985,16 @@ func (s *Service) beaconEndpoints(
|
||||
handler: server.GetPendingPartialWithdrawals,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/states/{state_id}/proposer_lookahead",
|
||||
name: namespace + ".GetProposerLookahead",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetProposerLookahead,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1138,7 +1148,6 @@ func (s *Service) eventsEndpoints() []endpoint {
|
||||
name: namespace + ".StreamEvents",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.EventStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.StreamEvents,
|
||||
methods: []string{http.MethodGet},
|
||||
|
||||
@@ -32,6 +32,7 @@ func Test_endpoints(t *testing.T) {
|
||||
"/eth/v1/beacon/states/{state_id}/pending_deposits": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/pending_partial_withdrawals": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/pending_consolidations": {http.MethodGet},
|
||||
"/eth/v1/beacon/states/{state_id}/proposer_lookahead": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers": {http.MethodGet},
|
||||
"/eth/v1/beacon/headers/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blinded_blocks": {http.MethodPost},
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"handlers_state.go",
|
||||
"handlers_validator.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"server.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/beacon",
|
||||
@@ -61,6 +62,8 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -123,6 +126,7 @@ go_test(
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_stretchr_testify//mock:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
@@ -658,6 +659,12 @@ func (s *Server) PublishBlock(w http.ResponseWriter, r *http.Request) {
|
||||
// broadcast all given signed blobs. The broadcast behaviour may be adjusted via the
|
||||
// `broadcast_validation` query parameter.
|
||||
func (s *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
duration := time.Since(start).Milliseconds()
|
||||
publishBlockV2Duration.Observe(float64(duration))
|
||||
}()
|
||||
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.PublishBlockV2")
|
||||
defer span.End()
|
||||
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
@@ -1304,7 +1311,7 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1485,7 +1492,7 @@ func (s *Server) GetFinalityCheckpoints(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1659,7 +1666,7 @@ func (s *Server) GetPendingConsolidations(w http.ResponseWriter, r *http.Request
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1715,7 +1722,7 @@ func (s *Server) GetPendingDeposits(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1771,7 +1778,7 @@ func (s *Server) GetPendingPartialWithdrawals(w http.ResponseWriter, r *http.Req
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1790,6 +1797,63 @@ func (s *Server) GetPendingPartialWithdrawals(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) GetProposerLookahead(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetProposerLookahead")
|
||||
defer span.End()
|
||||
|
||||
stateId := r.PathValue("state_id")
|
||||
if stateId == "" {
|
||||
httputil.HandleError(w, "state_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(stateId))
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
if st.Version() < version.Fulu {
|
||||
httputil.HandleError(w, "state_id is prior to fulu", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
pl, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get proposer look ahead: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(st.Version()))
|
||||
if httputil.RespondWithSsz(r) {
|
||||
sszLen := (*primitives.ValidatorIndex)(nil).SizeSSZ()
|
||||
sszData := make([]byte, len(pl)*sszLen)
|
||||
for i, idx := range pl {
|
||||
copy(sszData[i*sszLen:(i+1)*sszLen], ssz.MarshalUint64([]byte{}, uint64(idx)))
|
||||
}
|
||||
httputil.WriteSsz(w, sszData)
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not calculate root of latest block header: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
|
||||
vi := make([]string, len(pl))
|
||||
for i, v := range pl {
|
||||
vi[i] = strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
resp := structs.GetProposerLookaheadResponse{
|
||||
Version: version.String(st.Version()),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: isFinalized,
|
||||
Data: vi,
|
||||
}
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
}
|
||||
|
||||
// SerializeItems serializes a slice of items, each of which implements the MarshalSSZ method,
|
||||
// into a single byte array.
|
||||
func serializeItems[T interface{ MarshalSSZ() ([]byte, error) }](items []T) ([]byte, error) {
|
||||
|
||||
@@ -1103,9 +1103,9 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(msgsInPool))
|
||||
assert.Equal(t, primitives.Slot(1), msgsInPool[0].Slot)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(msgsInPool[0].BlockRoot))
|
||||
assert.Equal(t, "0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c", hexutil.Encode(msgsInPool[0].BlockRoot))
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), msgsInPool[0].ValidatorIndex)
|
||||
assert.Equal(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505", hexutil.Encode(msgsInPool[0].Signature))
|
||||
assert.Equal(t, "0xb591bd4ca7d745b6e027879645d7c014fecb8c58631af070f7607acc0c1c948a5102a33267f0e4ba41a85b254b07df91185274375b2e6436e37e81d2fd46cb3751f5a6c86efb7499c1796c0c17e122a54ac067bb0f5ff41f3241659cceb0c21c", hexutil.Encode(msgsInPool[0].Signature))
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -2497,23 +2497,23 @@ var (
|
||||
singleSyncCommitteeMsg = `[
|
||||
{
|
||||
"slot": "1",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"beacon_block_root": "0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c",
|
||||
"validator_index": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
"signature": "0xb591bd4ca7d745b6e027879645d7c014fecb8c58631af070f7607acc0c1c948a5102a33267f0e4ba41a85b254b07df91185274375b2e6436e37e81d2fd46cb3751f5a6c86efb7499c1796c0c17e122a54ac067bb0f5ff41f3241659cceb0c21c"
|
||||
}
|
||||
]`
|
||||
multipleSyncCommitteeMsg = `[
|
||||
{
|
||||
"slot": "1",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"beacon_block_root": "0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c",
|
||||
"validator_index": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
"signature": "0xb591bd4ca7d745b6e027879645d7c014fecb8c58631af070f7607acc0c1c948a5102a33267f0e4ba41a85b254b07df91185274375b2e6436e37e81d2fd46cb3751f5a6c86efb7499c1796c0c17e122a54ac067bb0f5ff41f3241659cceb0c21c"
|
||||
},
|
||||
{
|
||||
"slot": "2",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"beacon_block_root": "0x2757f6fd8590925cd000a86a3e543f98a93eae23781783a33e34504729a8ad0c",
|
||||
"validator_index": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
"signature": "0x99dfe11b6c8b306d2c72eb891926d37922d226ea8e1e7484d6c30fab746494f192b0daa3e40c13f1e335b35238f3362c113455a329b1fab0bc500bc47f643786f49e151d5b5052afb51af57ba5aa34a6051dc90ee4de83a26eb54a895061d89a"
|
||||
}
|
||||
]`
|
||||
// signature is invalid
|
||||
@@ -2523,6 +2523,18 @@ var (
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "1",
|
||||
"signature": "foo"
|
||||
},
|
||||
{
|
||||
"slot": "1121",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
{
|
||||
"slot": "1121",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "2",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
]`
|
||||
// signatures are invalid
|
||||
|
||||
@@ -56,7 +56,7 @@ func (s *Server) GetStateRoot(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -125,7 +125,7 @@ func (s *Server) GetRandao(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ func (s *Server) GetSyncCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,7 @@ import (
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -5363,3 +5364,190 @@ func TestGetPendingPartialWithdrawals(t *testing.T) {
|
||||
require.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerLookahead(t *testing.T) {
|
||||
numValidators := 50
|
||||
// Create a Fulu state with proposer lookahead data
|
||||
st, _ := util.DeterministicGenesisStateFulu(t, uint64(numValidators))
|
||||
lookaheadSize := int(params.BeaconConfig().MinSeedLookahead+1) * int(params.BeaconConfig().SlotsPerEpoch)
|
||||
lookahead := make([]primitives.ValidatorIndex, lookaheadSize)
|
||||
for i := 0; i < lookaheadSize; i++ {
|
||||
lookahead[i] = primitives.ValidatorIndex(i % numValidators) // Cycle through validators
|
||||
}
|
||||
|
||||
require.NoError(t, st.SetProposerLookahead(lookahead))
|
||||
|
||||
chainService := &chainMock.ChainService{
|
||||
Optimistic: false,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
server := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
t.Run("json response", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
server.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
require.Equal(t, "fulu", rec.Header().Get(api.VersionHeader))
|
||||
|
||||
var resp structs.GetProposerLookaheadResponse
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
|
||||
|
||||
expectedVersion := version.String(st.Version())
|
||||
require.Equal(t, expectedVersion, resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
|
||||
// Verify the data
|
||||
require.Equal(t, lookaheadSize, len(resp.Data))
|
||||
for i := 0; i < lookaheadSize; i++ {
|
||||
expectedIdx := strconv.FormatUint(uint64(i%numValidators), 10)
|
||||
require.Equal(t, expectedIdx, resp.Data[i])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ssz response", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
server.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
require.Equal(t, "fulu", rec.Header().Get(api.VersionHeader))
|
||||
responseBytes := rec.Body.Bytes()
|
||||
validatorIndexSize := (*primitives.ValidatorIndex)(nil).SizeSSZ()
|
||||
require.Equal(t, len(responseBytes), validatorIndexSize*lookaheadSize)
|
||||
|
||||
recoveredIndices := make([]primitives.ValidatorIndex, lookaheadSize)
|
||||
for i := 0; i < lookaheadSize; i++ {
|
||||
start := i * validatorIndexSize
|
||||
end := start + validatorIndexSize
|
||||
|
||||
idx := ssz.UnmarshallUint64(responseBytes[start:end])
|
||||
recoveredIndices[i] = primitives.ValidatorIndex(idx)
|
||||
}
|
||||
require.DeepEqual(t, lookahead, recoveredIndices)
|
||||
})
|
||||
|
||||
t.Run("pre fulu state", func(t *testing.T) {
|
||||
preEplusSt, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
preFuluServer := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: preEplusSt,
|
||||
},
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
// Test JSON request
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
preFuluServer.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
|
||||
var errResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &errResp))
|
||||
require.Equal(t, "state_id is prior to fulu", errResp.Message)
|
||||
|
||||
// Test SSZ request
|
||||
sszReq := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
sszReq.Header.Set("Accept", "application/octet-stream")
|
||||
sszReq.SetPathValue("state_id", "head")
|
||||
sszRec := httptest.NewRecorder()
|
||||
sszRec.Body = new(bytes.Buffer)
|
||||
|
||||
preFuluServer.GetProposerLookahead(sszRec, sszReq)
|
||||
require.Equal(t, http.StatusBadRequest, sszRec.Code)
|
||||
|
||||
var sszErrResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(sszRec.Body.Bytes(), &sszErrResp))
|
||||
require.Equal(t, "state_id is prior to fulu", sszErrResp.Message)
|
||||
})
|
||||
|
||||
t.Run("missing state_id parameter", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
server.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
|
||||
var errResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &errResp))
|
||||
require.Equal(t, "state_id is required in URL params", errResp.Message)
|
||||
})
|
||||
|
||||
t.Run("optimistic node", func(t *testing.T) {
|
||||
optimisticChainService := &chainMock.ChainService{
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
optimisticServer := &Server{
|
||||
Stater: server.Stater,
|
||||
OptimisticModeFetcher: optimisticChainService,
|
||||
FinalizationFetcher: optimisticChainService,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
optimisticServer.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var resp structs.GetProposerLookaheadResponse
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
|
||||
require.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("finalized node", func(t *testing.T) {
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedChainService := &chainMock.ChainService{
|
||||
Optimistic: false,
|
||||
FinalizedRoots: map[[32]byte]bool{blockRoot: true},
|
||||
}
|
||||
finalizedServer := &Server{
|
||||
Stater: server.Stater,
|
||||
OptimisticModeFetcher: finalizedChainService,
|
||||
FinalizationFetcher: finalizedChainService,
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/proposer_lookahead", nil)
|
||||
req.SetPathValue("state_id", "head")
|
||||
rec := httptest.NewRecorder()
|
||||
rec.Body = new(bytes.Buffer)
|
||||
|
||||
finalizedServer.GetProposerLookahead(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Code)
|
||||
|
||||
var resp structs.GetProposerLookaheadResponse
|
||||
require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp))
|
||||
require.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -222,7 +222,7 @@ func (s *Server) GetValidator(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -258,7 +258,7 @@ func (s *Server) GetValidatorBalances(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -419,7 +419,7 @@ func (s *Server) getValidatorIdentitiesJSON(
|
||||
) {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
16
beacon-chain/rpc/eth/beacon/metrics.go
Normal file
16
beacon-chain/rpc/eth/beacon/metrics.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
publishBlockV2Duration = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "publish_block_v2_duration_milliseconds",
|
||||
Help: "Duration of publishBlockV2 endpoint processing in milliseconds",
|
||||
Buckets: []float64{1, 5, 20, 100, 500, 1000, 2000, 5000},
|
||||
},
|
||||
)
|
||||
)
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user